From 103c186c6f8a89ceea7b5b2a72747a9950915f08 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 28 Feb 2024 09:42:03 -0500 Subject: [PATCH 001/136] ci: temporarily disable some CI jobs (#7) --- .drone/drone.yml | 454 +----------------- .drone/pipelines/build_images.jsonnet | 5 + .drone/pipelines/publish.jsonnet | 7 + ...la-pr.yml => bump-formula-pr.yml.disabled} | 0 ...l => check-linux-build-image.yml.disabled} | 0 ...=> check-windows-build-image.yml.disabled} | 0 ...-release.yml => helm-release.yml.disabled} | 0 ...> publish-documentation-next.yml.disabled} | 0 ...lish-documentation-versioned.yml.disabled} | 0 Makefile | 2 +- 10 files changed, 14 insertions(+), 454 deletions(-) rename .github/workflows/{bump-formula-pr.yml => bump-formula-pr.yml.disabled} (100%) rename .github/workflows/{check-linux-build-image.yml => check-linux-build-image.yml.disabled} (100%) rename .github/workflows/{check-windows-build-image.yml => check-windows-build-image.yml.disabled} (100%) rename .github/workflows/{helm-release.yml => helm-release.yml.disabled} (100%) rename .github/workflows/{publish-documentation-next.yml => publish-documentation-next.yml.disabled} (100%) rename .github/workflows/{publish-documentation-versioned.yml => publish-documentation-versioned.yml.disabled} (100%) diff --git a/.drone/drone.yml b/.drone/drone.yml index f521c097fe..8833018b6f 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -1,107 +1,5 @@ --- kind: pipeline -name: Create Linux build image -platform: - arch: amd64 - os: linux -steps: -- commands: - - export IMAGE_TAG=${DRONE_TAG##build-image/v} - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - docker buildx create --name multiarch --driver docker-container --use - - docker buildx build --build-arg="GO_RUNTIME=golang:1.22.0-bullseye" --push --platform - linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG ./build-image - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - image: docker - name: Build - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - event: - - tag - ref: - - refs/tags/build-image/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Create Linux build image for boringcrypto -platform: - arch: amd64 - os: linux -steps: -- commands: - - export IMAGE_TAG=${DRONE_TAG##build-image/v}-boringcrypto - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - docker buildx create --name multiarch --driver docker-container --use - - docker buildx build --build-arg="GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22-bullseye" - --push --platform linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG - ./build-image - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - image: docker - name: Build - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - event: - - tag - ref: - - refs/tags/build-image/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Create Windows build image -platform: - arch: amd64 - os: windows - version: "1809" -steps: -- commands: - - $IMAGE_TAG="${DRONE_TAG##build-image/v}-windows" - - docker login -u $Env:DOCKER_LOGIN -p $Env:DOCKER_PASSWORD - - docker build -t grafana/agent-build-image:$IMAGE_TAG ./build-image/windows - - docker push grafana/agent-build-image:$IMAGE_TAG - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - image: docker:windowsservercore-1809 - name: Build - volumes: - - name: docker - path: //./pipe/docker_engine/ -trigger: - event: - - tag - ref: - - refs/tags/build-image/v* -type: docker -volumes: -- host: - path: //./pipe/docker_engine/ - name: docker ---- -kind: pipeline name: Lint platform: arch: amd64 @@ -896,356 +794,6 @@ trigger: type: docker --- kind: pipeline -name: Publish Linux agent container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.31.0 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-agent-${DRONE_COMMIT_SHA} --driver - docker-container --use - - ./tools/ci/docker-containers agent - - docker buildx rm multiarch-agent-agent-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.31.0 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Publish Linux agent-boringcrypto container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.31.0 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-agent-boringcrypto-${DRONE_COMMIT_SHA} - --driver docker-container --use - - ./tools/ci/docker-containers agent-boringcrypto - - docker buildx rm multiarch-agent-agent-boringcrypto-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.31.0 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Publish Linux agentctl container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.31.0 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-agentctl-${DRONE_COMMIT_SHA} --driver - docker-container --use - - ./tools/ci/docker-containers agentctl - - docker buildx rm multiarch-agent-agentctl-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.31.0 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Publish Linux agent-operator container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.31.0 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-agent-operator-${DRONE_COMMIT_SHA} - --driver docker-container --use - - ./tools/ci/docker-containers agent-operator - - docker buildx rm multiarch-agent-agent-operator-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.31.0 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Publish Windows agent container -platform: - arch: amd64 - os: windows - version: "1809" -steps: -- commands: - - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - image: grafana/agent-build-image:0.31.0-windows - name: Build containers - volumes: - - name: docker - path: //./pipe/docker_engine/ -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: //./pipe/docker_engine/ - name: docker ---- -kind: pipeline -name: Publish Windows agentctl container -platform: - arch: amd64 - os: windows - version: "1809" -steps: -- commands: - - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agentctl' - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - image: grafana/agent-build-image:0.31.0-windows - name: Build containers - volumes: - - name: docker - path: //./pipe/docker_engine/ -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: //./pipe/docker_engine/ - name: docker ---- -depends_on: -- Publish Linux agent container -- Publish Linux agent-boringcrypto container -- Publish Linux agentctl container -- Publish Linux agent-operator container -image_pull_secrets: -- dockerconfigjson -kind: pipeline -name: Deploy to deployment_tools -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk update && apk add git - - echo "$(sh ./tools/image-tag)" > .tag-only - - echo "grafana/agent:$(sh ./tools/image-tag)" > .image-tag - image: alpine - name: Create .image-tag -- image: us.gcr.io/kubernetes-dev/drone/plugins/updater - name: Update deployment_tools - settings: - config_json: | - { - "git_committer_name": "updater-for-ci[bot]", - "git_author_name": "updater-for-ci[bot]", - "git_committer_email": "119986603+updater-for-ci[bot]@users.noreply.github.com", - "git_author_email": "119986603+updater-for-ci[bot]@users.noreply.github.com", - "destination_branch": "master", - "repo_name": "deployment_tools", - "update_jsonnet_attribute_configs": [ - { - "file_path": "ksonnet/environments/kowalski/dev-us-central-0.kowalski-dev/main.jsonnet", - "jsonnet_key": "agent_image", - "jsonnet_value_file": ".image-tag" - }, - { - "file_path": "ksonnet/environments/grafana-agent/waves/agent.libsonnet", - "jsonnet_key": "dev_canary", - "jsonnet_value_file": ".image-tag" - }, - { - "file_path": "ksonnet/environments/pyroscope-ebpf/waves/ebpf.libsonnet", - "jsonnet_key": "dev_canary", - "jsonnet_value_file": ".image-tag" - } - ] - } - github_app_id: - from_secret: updater_app_id - github_app_installation_id: - from_secret: updater_app_installation_id - github_app_private_key: - from_secret: updater_private_key -trigger: - ref: - - refs/heads/main -type: docker ---- -depends_on: -- Publish Linux agent container -- Publish Linux agent-boringcrypto container -- Publish Linux agentctl container -- Publish Linux agent-operator container -- Publish Windows agent container -- Publish Windows agentctl container -image_pull_secrets: -- dockerconfigjson -kind: pipeline -name: Publish release -platform: - arch: amd64 - os: linux -steps: -- commands: - - /usr/bin/github-app-external-token > /drone/src/gh-token.txt - environment: - GITHUB_APP_ID: - from_secret: updater_app_id - GITHUB_APP_INSTALLATION_ID: - from_secret: updater_app_installation_id - GITHUB_APP_PRIVATE_KEY: - from_secret: updater_private_key - image: us.gcr.io/kubernetes-dev/github-app-secret-writer:latest - name: Generate GitHub token -- commands: - - export GITHUB_TOKEN=$(cat /drone/src/gh-token.txt) - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - make -j4 RELEASE_BUILD=1 VERSION=${DRONE_TAG} dist - - | - VERSION=${DRONE_TAG} RELEASE_DOC_TAG=$(echo ${DRONE_TAG} | awk -F '.' '{print $1"."$2}') ./tools/release - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GPG_PASSPHRASE: - from_secret: gpg_passphrase - GPG_PRIVATE_KEY: - from_secret: gpg_private_key - GPG_PUBLIC_KEY: - from_secret: gpg_public_key - image: grafana/agent-build-image:0.31.0 - name: Publish release - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline name: Test Linux system packages platform: arch: amd64 @@ -1352,6 +900,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 2e439110a89f33a78d745a71635d47f9b1a99de6028bb84c258a0be9c09840f2 +hmac: 25f83467a6323fb131c803ff1e9b9b0676d28c40ab0d55f374307685111eb61f ... diff --git a/.drone/pipelines/build_images.jsonnet b/.drone/pipelines/build_images.jsonnet index 328c7fc344..9378bb703e 100644 --- a/.drone/pipelines/build_images.jsonnet +++ b/.drone/pipelines/build_images.jsonnet @@ -17,6 +17,10 @@ local locals = { }; [ + // TODO(rfratto): The following are temporarily diasbled as grafana/alloy + // gets set up. + + /* pipelines.linux('Create Linux build image') { trigger: locals.on_build_image_tag, steps: [{ @@ -88,4 +92,5 @@ local locals = { host: { path: '//./pipe/docker_engine/' }, }], }, + */ ] diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index 6f8c4c2b20..bd578daca7 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -91,6 +91,12 @@ local windows_containers_jobs = std.map(function(container) ( } ), windows_containers); +// TODO(rfratto): The following are TEMPORARILY disabled as grafana/alloy gets +// set up. Remove the line below in favor of the comment block to reenable the +// publish jobs. +[] + +/* linux_containers_jobs + windows_containers_jobs + [ pipelines.linux('Deploy to deployment_tools') { trigger: { @@ -196,3 +202,4 @@ linux_containers_jobs + windows_containers_jobs + [ }], }, ] +*/ diff --git a/.github/workflows/bump-formula-pr.yml b/.github/workflows/bump-formula-pr.yml.disabled similarity index 100% rename from .github/workflows/bump-formula-pr.yml rename to .github/workflows/bump-formula-pr.yml.disabled diff --git a/.github/workflows/check-linux-build-image.yml b/.github/workflows/check-linux-build-image.yml.disabled similarity index 100% rename from .github/workflows/check-linux-build-image.yml rename to .github/workflows/check-linux-build-image.yml.disabled diff --git a/.github/workflows/check-windows-build-image.yml b/.github/workflows/check-windows-build-image.yml.disabled similarity index 100% rename from .github/workflows/check-windows-build-image.yml rename to .github/workflows/check-windows-build-image.yml.disabled diff --git a/.github/workflows/helm-release.yml b/.github/workflows/helm-release.yml.disabled similarity index 100% rename from .github/workflows/helm-release.yml rename to .github/workflows/helm-release.yml.disabled diff --git a/.github/workflows/publish-documentation-next.yml b/.github/workflows/publish-documentation-next.yml.disabled similarity index 100% rename from .github/workflows/publish-documentation-next.yml rename to .github/workflows/publish-documentation-next.yml.disabled diff --git a/.github/workflows/publish-documentation-versioned.yml b/.github/workflows/publish-documentation-versioned.yml.disabled similarity index 100% rename from .github/workflows/publish-documentation-versioned.yml rename to .github/workflows/publish-documentation-versioned.yml.disabled diff --git a/Makefile b/Makefile index f08f0f9d89..aabf28a9c6 100644 --- a/Makefile +++ b/Makefile @@ -331,7 +331,7 @@ endif .PHONY: drone drone: generate-drone drone lint .drone/drone.yml --trusted - drone --server https://drone.grafana.net sign --save grafana/agent .drone/drone.yml + drone --server https://drone.grafana.net sign --save grafana/alloy .drone/drone.yml .PHONY: clean clean: clean-dist clean-build-container-cache From b550b6e70dcb692bc4678aa3b335d62a59eb3efa Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 28 Feb 2024 11:12:28 -0500 Subject: [PATCH 002/136] docs/developer: change release branch naming convention (#8) Change the release branch naming convention to `release/vMAJOR.MINOR`. This naming convention makes it easier to write branch protection rules for release branches without colliding with branches pushed by contributors. --- docs/developer/release/1-create-release-branch.md | 8 ++++---- docs/developer/release/3-update-version-in-code.md | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/developer/release/1-create-release-branch.md b/docs/developer/release/1-create-release-branch.md index 91a6ed0b3a..5e7a3c6508 100644 --- a/docs/developer/release/1-create-release-branch.md +++ b/docs/developer/release/1-create-release-branch.md @@ -14,13 +14,13 @@ Patch Releases for that major pr minor version of the agent. 2. Create and push the release branch from the selected base commit: - The name of the release branch should be `release-VERSION_PREFIX` - defined above, such as `release-v0.31`. + The name of the release branch should be `release/VERSION_PREFIX` + defined above, such as `release/v0.31`. - > **NOTE**: Branches are only made for VERSION_PREFIX; do not create branches for the full VERSION such as `release-v0.31-rc.0` or `release-v0.31.0`. + > **NOTE**: Branches are only made for VERSION_PREFIX; do not create branches for the full VERSION such as `release/v0.31-rc.0` or `release/v0.31.0`. - If the consensus commit is the latest commit from main you can branch from main. - If the consensus commit is not the latest commit from main, branch from that instead. > **NOTE**: Don't create any other branches that are prefixed with `release` when creating PRs or - those branches will collide with our automated release build publish rules. \ No newline at end of file + those branches will collide with our automated release build publish rules. diff --git a/docs/developer/release/3-update-version-in-code.md b/docs/developer/release/3-update-version-in-code.md index b3de341873..73026115de 100644 --- a/docs/developer/release/3-update-version-in-code.md +++ b/docs/developer/release/3-update-version-in-code.md @@ -40,7 +40,7 @@ The project must be updated to reference the upcoming release tag whenever a new - Stable Release example PR [here](https://github.com/grafana/agent/pull/3119) - Patch Release example PR [here](https://github.com/grafana/agent/pull/3191) -4. Create a branch from `release-VERSION_PREFIX` for [grafana/agent](https://github.com/grafana/agent). +4. Create a branch from `release/VERSION_PREFIX` for [grafana/agent](https://github.com/grafana/agent). 5. Cherry pick the commit on main from the merged PR in Step 3 from main into the new branch from Step 4: @@ -50,9 +50,9 @@ The project must be updated to reference the upcoming release tag whenever a new Delete the `Main (unreleased)` header and anything underneath it as part of the cherry-pick. Alternatively, do it after the cherry-pick is completed. -6. Create a PR to merge to `release-VERSION_PREFIX` (must be merged before continuing). +6. Create a PR to merge to `release/VERSION_PREFIX` (must be merged before continuing). - Release Candidate example PR [here](https://github.com/grafana/agent/pull/3066) - Stable Release example PR [here](https://github.com/grafana/agent/pull/3123) - Patch Release example PR [here](https://github.com/grafana/agent/pull/3193) - - The `CHANGELOG.md` was updated in cherry-pick commits prior for this example. Make sure it is all set on this PR. \ No newline at end of file + - The `CHANGELOG.md` was updated in cherry-pick commits prior for this example. Make sure it is all set on this PR. From c681d220e17ffcf7ea51249be7f60c42ad327ac4 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 28 Feb 2024 21:03:39 -0500 Subject: [PATCH 003/136] ci: temporarily disable dependabot (#10) As grafana/agent is currently the source of truth for code changes, we don't want to merge any code that could be merged to grafana/agent first. This configuration file will be re-added following the grafana/alloy release. --- .github/{dependabot.yml => dependabot.yml.disabled} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{dependabot.yml => dependabot.yml.disabled} (100%) diff --git a/.github/dependabot.yml b/.github/dependabot.yml.disabled similarity index 100% rename from .github/dependabot.yml rename to .github/dependabot.yml.disabled From 813d60b3439dc50f743757c051f24267458a14f8 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Thu, 29 Feb 2024 13:29:18 -0500 Subject: [PATCH 004/136] Remove static mode and static mode operator (#15) * Remove cmd/grafana-agentctl and its presence from release assets * Remove cmd/grafana-agent-operator and its Docker image * Remove cmd/grafana-agent-flow and its presence from release assets * Remove all static mode code from cmd/grafana-agent * Replace packages with the ones used for grafana-agent-flow (but renamed) * Temporarily remove example/docker-compose, which was extremely static-mode tailored. --- .drone/drone.yml | 516 +-- .drone/pipelines/check_containers.jsonnet | 3 - .drone/pipelines/crosscompile.jsonnet | 11 +- .drone/pipelines/publish.jsonnet | 4 +- .drone/pipelines/test.jsonnet | 34 - .drone/pipelines/test_packages.jsonnet | 2 - Makefile | 125 +- cmd/grafana-agent-flow/main.go | 32 - cmd/grafana-agent-operator/DEVELOPERS.md | 189 - cmd/grafana-agent-operator/Dockerfile | 37 - cmd/grafana-agent-operator/README.md | 38 - .../agent-example-config.yaml | 260 -- .../example-grafana.yaml | 144 - cmd/grafana-agent-operator/example-loki.yaml | 112 - .../example-prometheus.yaml | 96 - cmd/grafana-agent-operator/main.go | 94 - cmd/grafana-agent-service/config_windows.go | 2 +- cmd/grafana-agent-service/main_windows.go | 2 +- cmd/grafana-agent/Dockerfile | 4 +- cmd/grafana-agent/Dockerfile.windows | 4 +- cmd/grafana-agent/agent-local-config.yaml | 17 - cmd/grafana-agent/entrypoint.go | 465 --- cmd/grafana-agent/main.go | 65 +- cmd/grafana-agent/mode.go | 30 - cmd/grafana-agent/run-config.river | 5 - cmd/grafana-agent/service.go | 15 - cmd/grafana-agent/service_windows.go | 109 - cmd/grafana-agentctl/Dockerfile | 38 - cmd/grafana-agentctl/Dockerfile.windows | 19 - cmd/grafana-agentctl/main.go | 477 --- example/docker-compose/README.md | 58 - .../docker-compose/agent/config/agent.yaml | 155 - example/docker-compose/docker-compose.yaml | 233 -- .../docker-compose/grafana/config/grafana.ini | 9 - .../dashboards-provisioning/dashboards.yaml | 14 - .../dashboards/agent-logs-pipeline.json | 1082 ----- .../grafana/dashboards/agent-operational.json | 1189 ------ .../dashboards/agent-remote-write.json | 1512 ------- .../dashboards/agent-tracing-pipeline.json | 1065 ----- .../grafana/dashboards/agent.json | 786 ---- .../grafana/dashboards/template.jsonnet | 14 - .../grafana/datasources/datasource.yml | 42 - example/docker-compose/jsonnetfile.json | 14 - example/docker-compose/jsonnetfile.lock.json | 34 - .../docker-compose/mimir/config/mimir.yaml | 63 - internal/cmd/integration-tests/utils.go | 4 +- .../static/operator/apis/monitoring/doc.go | 4 - .../apis/monitoring/v1alpha1/deployment.go | 54 - .../operator/apis/monitoring/v1alpha1/doc.go | 7 - .../apis/monitoring/v1alpha1/group.go | 36 - .../apis/monitoring/v1alpha1/types.go | 206 - .../monitoring/v1alpha1/types_integrations.go | 134 - .../apis/monitoring/v1alpha1/types_logs.go | 584 --- .../apis/monitoring/v1alpha1/types_metrics.go | 274 -- .../v1alpha1/zz_generated.deepcopy.go | 1491 ------- internal/static/operator/assets/assets.go | 60 - internal/static/operator/build_hierarchy.go | 297 -- .../static/operator/build_hierarchy_test.go | 202 - .../static/operator/clientutil/clientutil.go | 268 -- internal/static/operator/clientutil/merge.go | 64 - internal/static/operator/config/config.go | 176 - .../operator/config/config_references.go | 75 - .../operator/config/config_references_test.go | 84 - .../static/operator/config/config_test.go | 492 --- .../static/operator/config/fs_importer.go | 71 - .../config/integration_templates_test.go | 66 - .../operator/config/logs_templates_test.go | 762 ---- .../operator/config/metrics_templates_test.go | 1161 ------ .../templates/agent-integrations.libsonnet | 135 - .../config/templates/agent-logs.libsonnet | 44 - .../config/templates/agent-metrics.libsonnet | 64 - .../templates/component/logs/client.libsonnet | 51 - .../component/logs/external_labels.libsonnet | 27 - .../component/logs/pod_logs.libsonnet | 152 - .../component/logs/relabel_config.libsonnet | 12 - .../templates/component/logs/stages.libsonnet | 145 - .../metrics/external_labels.libsonnet | 38 - .../metrics/kube_sd_config.libsonnet | 42 - .../component/metrics/pod_monitor.libsonnet | 248 -- .../component/metrics/probe.libsonnet | 216 - .../metrics/relabel_config.libsonnet | 12 - .../component/metrics/remote_write.libsonnet | 87 - .../metrics/safe_tls_config.libsonnet | 13 - .../metrics/service_monitor.libsonnet | 277 -- .../component/metrics/tls_config.libsonnet | 28 - .../config/templates/ext/marshal.libsonnet | 12 - .../config/templates/ext/optionals.libsonnet | 41 - .../config/templates/ext/secrets.libsonnet | 65 - .../config/templates/integrations.libsonnet | 9 - .../operator/config/templates/logs.libsonnet | 74 - .../config/templates/metrics.libsonnet | 133 - .../config/templates/utils/k8s.libsonnet | 49 - internal/static/operator/config/utils.go | 132 - internal/static/operator/config/utils_test.go | 22 - internal/static/operator/defaults.go | 15 - internal/static/operator/defaults.go.t | 15 - .../static/operator/hierarchy/hierarchy.go | 149 - .../operator/hierarchy/hierarchy_test.go | 146 - internal/static/operator/hierarchy/list.go | 50 - .../static/operator/hierarchy/selector.go | 86 - internal/static/operator/kubelet.go | 158 - internal/static/operator/kubelet_test.go | 117 - internal/static/operator/logutil/log.go | 79 - internal/static/operator/operator.go | 276 -- internal/static/operator/operator_test.go | 186 - internal/static/operator/reconciler.go | 136 - .../operator/reconciler_integrations.go | 122 - .../operator/reconciler_integrations_test.go | 82 - internal/static/operator/reconciler_logs.go | 55 - .../static/operator/reconciler_metrics.go | 207 - .../static/operator/resources_integrations.go | 237 -- internal/static/operator/resources_logs.go | 95 - internal/static/operator/resources_metrics.go | 254 -- .../static/operator/resources_metrics_test.go | 25 - .../static/operator/resources_pod_template.go | 319 -- .../operator/resources_pod_template_test.go | 171 - .../testdata/test-custom-mounts.in.yaml | 70 - .../testdata/test-custom-mounts.out.yaml | 148 - .../testdata/test-integrations.in.yaml | 237 -- .../testdata/test-integrations.out.yaml | 151 - .../testdata/test-metrics-instance.in.yaml | 84 - .../testdata/test-metrics-instance.out.yaml | 153 - .../testdata/test-resource-hierarchy.yaml | 205 - .../automaticloggingprocessor.go | 8 +- .../agent_linux_packages_test.go | 10 +- .../flow_linux_packages_test.go | 119 - internal/util/k8s/k8s.go | 132 - internal/util/k8s/k8s_test.go | 35 - internal/util/k8s/objects.go | 191 - internal/util/k8s/resources.go | 101 - internal/util/sanitize.go | 10 + .../alerts.libsonnet | 0 .../alerts/clustering.libsonnet | 0 .../alerts/controller.libsonnet | 0 .../alerts/opentelemetry.libsonnet | 0 .../alerts/utils/alert.jsonnet | 0 .../dashboards.libsonnet | 0 .../dashboards/cluster-node.libsonnet | 0 .../dashboards/cluster-overview.libsonnet | 0 .../dashboards/controller.libsonnet | 0 .../dashboards/opentelemetry.libsonnet | 0 .../dashboards/prometheus.libsonnet | 0 .../dashboards/resources.libsonnet | 0 .../dashboards/utils/dashboard.jsonnet | 6 +- .../dashboards/utils/panel.jsonnet | 0 .../grizzly.jsonnet | 0 .../grizzly/alerts.jsonnet | 0 .../grizzly/dashboards.jsonnet | 0 .../jsonnetfile.json | 0 .../mixin.libsonnet | 0 .../agent-static-mixin/alerts.libsonnet | 288 -- .../agent-static-mixin/config.libsonnet | 13 - .../agent-static-mixin/dashboards.libsonnet | 789 ---- .../agent-static-mixin/debugging.libsonnet | 128 - .../agent-static-mixin/jsonnetfile.json | 25 - operations/agent-static-mixin/mixin.libsonnet | 4 - operations/agent-static-mixin/utils.libsonnet | 34 - .../monitoring.coreos.com_podmonitors.yaml | 424 -- .../crds/monitoring.coreos.com_probes.yaml | 458 -- ...monitoring.coreos.com_servicemonitors.yaml | 436 -- .../monitoring.grafana.com_grafanaagents.yaml | 3711 ----------------- .../monitoring.grafana.com_integrations.yaml | 810 ---- .../monitoring.grafana.com_logsinstances.yaml | 299 -- ...nitoring.grafana.com_metricsinstances.yaml | 495 --- .../crds/monitoring.grafana.com_podlogs.yaml | 308 -- .../templates/agent-operator.yaml | 645 --- .../grafana-agent-flow/deb/control/postinst | 45 - .../grafana-agent-flow/deb/control/prerm | 12 - .../deb/grafana-agent-flow.service | 20 - packaging/grafana-agent-flow/environment-file | 16 - .../grafana-agent-flow/rpm/control/postinst | 49 - .../grafana-agent-flow/rpm/control/prerm | 20 - packaging/grafana-agent-flow/rpm/gpg-sign.sh | 36 - .../rpm/grafana-agent-flow.service | 20 - .../windows/install_script.nsis | 200 - packaging/grafana-agent-flow/windows/logo.ico | Bin 15086 -> 0 bytes packaging/grafana-agent/deb/control/postinst | 4 +- .../grafana-agent/deb/grafana-agent.service | 8 +- packaging/grafana-agent/environment-file | 12 +- .../grafana-agent.river} | 4 +- packaging/grafana-agent/grafana-agent.yaml | 27 - packaging/grafana-agent/rpm/control/postinst | 8 +- .../grafana-agent/rpm/grafana-agent.service | 8 +- packaging/grafana-agent/windows/.gitignore | 2 - packaging/grafana-agent/windows/Dockerfile | 4 - .../windows/config.river | 0 .../grafana-agent/windows/install_script.nsis | 381 +- .../windows/macros.nsis | 0 tools/ci/docker-containers | 28 +- tools/generate-crds.bash | 23 - tools/make/packaging.mk | 225 +- tools/release | 1 - 192 files changed, 277 insertions(+), 32302 deletions(-) delete mode 100644 cmd/grafana-agent-flow/main.go delete mode 100644 cmd/grafana-agent-operator/DEVELOPERS.md delete mode 100644 cmd/grafana-agent-operator/Dockerfile delete mode 100644 cmd/grafana-agent-operator/README.md delete mode 100644 cmd/grafana-agent-operator/agent-example-config.yaml delete mode 100644 cmd/grafana-agent-operator/example-grafana.yaml delete mode 100644 cmd/grafana-agent-operator/example-loki.yaml delete mode 100644 cmd/grafana-agent-operator/example-prometheus.yaml delete mode 100644 cmd/grafana-agent-operator/main.go delete mode 100644 cmd/grafana-agent/agent-local-config.yaml delete mode 100644 cmd/grafana-agent/entrypoint.go delete mode 100644 cmd/grafana-agent/mode.go delete mode 100644 cmd/grafana-agent/run-config.river delete mode 100644 cmd/grafana-agent/service.go delete mode 100644 cmd/grafana-agent/service_windows.go delete mode 100644 cmd/grafana-agentctl/Dockerfile delete mode 100644 cmd/grafana-agentctl/Dockerfile.windows delete mode 100644 cmd/grafana-agentctl/main.go delete mode 100644 example/docker-compose/README.md delete mode 100644 example/docker-compose/agent/config/agent.yaml delete mode 100644 example/docker-compose/docker-compose.yaml delete mode 100644 example/docker-compose/grafana/config/grafana.ini delete mode 100644 example/docker-compose/grafana/dashboards-provisioning/dashboards.yaml delete mode 100644 example/docker-compose/grafana/dashboards/agent-logs-pipeline.json delete mode 100644 example/docker-compose/grafana/dashboards/agent-operational.json delete mode 100644 example/docker-compose/grafana/dashboards/agent-remote-write.json delete mode 100644 example/docker-compose/grafana/dashboards/agent-tracing-pipeline.json delete mode 100644 example/docker-compose/grafana/dashboards/agent.json delete mode 100644 example/docker-compose/grafana/dashboards/template.jsonnet delete mode 100644 example/docker-compose/grafana/datasources/datasource.yml delete mode 100644 example/docker-compose/jsonnetfile.json delete mode 100644 example/docker-compose/jsonnetfile.lock.json delete mode 100644 example/docker-compose/mimir/config/mimir.yaml delete mode 100644 internal/static/operator/apis/monitoring/doc.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/deployment.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/doc.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/group.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/types.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/types_integrations.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/types_logs.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/types_metrics.go delete mode 100644 internal/static/operator/apis/monitoring/v1alpha1/zz_generated.deepcopy.go delete mode 100644 internal/static/operator/assets/assets.go delete mode 100644 internal/static/operator/build_hierarchy.go delete mode 100644 internal/static/operator/build_hierarchy_test.go delete mode 100644 internal/static/operator/clientutil/clientutil.go delete mode 100644 internal/static/operator/clientutil/merge.go delete mode 100644 internal/static/operator/config/config.go delete mode 100644 internal/static/operator/config/config_references.go delete mode 100644 internal/static/operator/config/config_references_test.go delete mode 100644 internal/static/operator/config/config_test.go delete mode 100644 internal/static/operator/config/fs_importer.go delete mode 100644 internal/static/operator/config/integration_templates_test.go delete mode 100644 internal/static/operator/config/logs_templates_test.go delete mode 100644 internal/static/operator/config/metrics_templates_test.go delete mode 100644 internal/static/operator/config/templates/agent-integrations.libsonnet delete mode 100644 internal/static/operator/config/templates/agent-logs.libsonnet delete mode 100644 internal/static/operator/config/templates/agent-metrics.libsonnet delete mode 100644 internal/static/operator/config/templates/component/logs/client.libsonnet delete mode 100644 internal/static/operator/config/templates/component/logs/external_labels.libsonnet delete mode 100644 internal/static/operator/config/templates/component/logs/pod_logs.libsonnet delete mode 100644 internal/static/operator/config/templates/component/logs/relabel_config.libsonnet delete mode 100644 internal/static/operator/config/templates/component/logs/stages.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/external_labels.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/kube_sd_config.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/pod_monitor.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/probe.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/relabel_config.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/remote_write.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/safe_tls_config.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/service_monitor.libsonnet delete mode 100644 internal/static/operator/config/templates/component/metrics/tls_config.libsonnet delete mode 100644 internal/static/operator/config/templates/ext/marshal.libsonnet delete mode 100644 internal/static/operator/config/templates/ext/optionals.libsonnet delete mode 100644 internal/static/operator/config/templates/ext/secrets.libsonnet delete mode 100644 internal/static/operator/config/templates/integrations.libsonnet delete mode 100644 internal/static/operator/config/templates/logs.libsonnet delete mode 100644 internal/static/operator/config/templates/metrics.libsonnet delete mode 100644 internal/static/operator/config/templates/utils/k8s.libsonnet delete mode 100644 internal/static/operator/config/utils.go delete mode 100644 internal/static/operator/config/utils_test.go delete mode 100644 internal/static/operator/defaults.go delete mode 100644 internal/static/operator/defaults.go.t delete mode 100644 internal/static/operator/hierarchy/hierarchy.go delete mode 100644 internal/static/operator/hierarchy/hierarchy_test.go delete mode 100644 internal/static/operator/hierarchy/list.go delete mode 100644 internal/static/operator/hierarchy/selector.go delete mode 100644 internal/static/operator/kubelet.go delete mode 100644 internal/static/operator/kubelet_test.go delete mode 100644 internal/static/operator/logutil/log.go delete mode 100644 internal/static/operator/operator.go delete mode 100644 internal/static/operator/operator_test.go delete mode 100644 internal/static/operator/reconciler.go delete mode 100644 internal/static/operator/reconciler_integrations.go delete mode 100644 internal/static/operator/reconciler_integrations_test.go delete mode 100644 internal/static/operator/reconciler_logs.go delete mode 100644 internal/static/operator/reconciler_metrics.go delete mode 100644 internal/static/operator/resources_integrations.go delete mode 100644 internal/static/operator/resources_logs.go delete mode 100644 internal/static/operator/resources_metrics.go delete mode 100644 internal/static/operator/resources_metrics_test.go delete mode 100644 internal/static/operator/resources_pod_template.go delete mode 100644 internal/static/operator/resources_pod_template_test.go delete mode 100644 internal/static/operator/testdata/test-custom-mounts.in.yaml delete mode 100644 internal/static/operator/testdata/test-custom-mounts.out.yaml delete mode 100644 internal/static/operator/testdata/test-integrations.in.yaml delete mode 100644 internal/static/operator/testdata/test-integrations.out.yaml delete mode 100644 internal/static/operator/testdata/test-metrics-instance.in.yaml delete mode 100644 internal/static/operator/testdata/test-metrics-instance.out.yaml delete mode 100644 internal/static/operator/testdata/test-resource-hierarchy.yaml delete mode 100644 internal/tools/packaging_test/flow_linux_packages_test.go delete mode 100644 internal/util/k8s/k8s.go delete mode 100644 internal/util/k8s/k8s_test.go delete mode 100644 internal/util/k8s/objects.go delete mode 100644 internal/util/k8s/resources.go create mode 100644 internal/util/sanitize.go rename operations/{agent-flow-mixin => agent-mixin}/alerts.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/alerts/clustering.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/alerts/controller.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/alerts/opentelemetry.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/alerts/utils/alert.jsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/cluster-node.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/cluster-overview.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/controller.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/opentelemetry.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/prometheus.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/resources.libsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/utils/dashboard.jsonnet (94%) rename operations/{agent-flow-mixin => agent-mixin}/dashboards/utils/panel.jsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/grizzly.jsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/grizzly/alerts.jsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/grizzly/dashboards.jsonnet (100%) rename operations/{agent-flow-mixin => agent-mixin}/jsonnetfile.json (100%) rename operations/{agent-flow-mixin => agent-mixin}/mixin.libsonnet (100%) delete mode 100644 operations/agent-static-mixin/alerts.libsonnet delete mode 100644 operations/agent-static-mixin/config.libsonnet delete mode 100644 operations/agent-static-mixin/dashboards.libsonnet delete mode 100644 operations/agent-static-mixin/debugging.libsonnet delete mode 100644 operations/agent-static-mixin/jsonnetfile.json delete mode 100644 operations/agent-static-mixin/mixin.libsonnet delete mode 100644 operations/agent-static-mixin/utils.libsonnet delete mode 100644 operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml delete mode 100644 operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml delete mode 100644 operations/agent-static-operator/templates/agent-operator.yaml delete mode 100644 packaging/grafana-agent-flow/deb/control/postinst delete mode 100644 packaging/grafana-agent-flow/deb/control/prerm delete mode 100644 packaging/grafana-agent-flow/deb/grafana-agent-flow.service delete mode 100644 packaging/grafana-agent-flow/environment-file delete mode 100644 packaging/grafana-agent-flow/rpm/control/postinst delete mode 100644 packaging/grafana-agent-flow/rpm/control/prerm delete mode 100755 packaging/grafana-agent-flow/rpm/gpg-sign.sh delete mode 100644 packaging/grafana-agent-flow/rpm/grafana-agent-flow.service delete mode 100644 packaging/grafana-agent-flow/windows/install_script.nsis delete mode 100644 packaging/grafana-agent-flow/windows/logo.ico rename packaging/{grafana-agent-flow/grafana-agent-flow.river => grafana-agent/grafana-agent.river} (89%) delete mode 100644 packaging/grafana-agent/grafana-agent.yaml delete mode 100644 packaging/grafana-agent/windows/.gitignore delete mode 100644 packaging/grafana-agent/windows/Dockerfile rename packaging/{grafana-agent-flow => grafana-agent}/windows/config.river (100%) rename packaging/{grafana-agent-flow => grafana-agent}/windows/macros.nsis (100%) delete mode 100755 tools/generate-crds.bash diff --git a/.drone/drone.yml b/.drone/drone.yml index ad7e093e0f..1e1fb6d2da 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -16,42 +16,6 @@ trigger: type: docker --- kind: pipeline -name: Test dashboards -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-dashboards - - ERR_MSG="Dashboard definitions are out of date. Please run 'make generate-dashboards' - and commit changes!" - - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.32.0 - name: Regenerate dashboards -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Test crds -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-crds - - ERR_MSG="Custom Resource Definitions are out of date. Please run 'make generate-crds' - and commit changes!" - - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.32.0 - name: Regenerate crds -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline name: Test platform: arch: amd64 @@ -130,56 +94,6 @@ volumes: name: docker --- kind: pipeline -name: Check Linux container (grafana/agentctl) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make agentctl-image - image: grafana/agent-build-image:0.32.0 - name: Build container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - paths: - - cmd/grafana-agentctl/Dockerfile - - tools/ci/docker-containers - ref: - - refs/heads/main -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Check Linux container (grafana/agent-operator) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make operator-image - image: grafana/agent-build-image:0.32.0 - name: Build container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - paths: - - cmd/grafana-agent-operator/Dockerfile - - tools/ci/docker-containers - ref: - - refs/heads/main -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline name: Check Windows container (grafana/agent) platform: arch: amd64 @@ -206,32 +120,6 @@ volumes: name: docker --- kind: pipeline -name: Check Windows container (grafana/agentctl) -platform: - arch: amd64 - os: windows - version: "1809" -steps: -- commands: - - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agentctl' - image: grafana/agent-build-image:0.32.0-windows - name: Build container - volumes: - - name: docker - path: //./pipe/docker_engine/ -trigger: - paths: - - cmd/grafana-agentctl/Dockerfile.windows - - tools/ci/docker-containers-windows - ref: - - refs/heads/main -type: docker -volumes: -- host: - path: //./pipe/docker_engine/ - name: docker ---- -kind: pipeline name: Build agent (Linux amd64) platform: arch: amd64 @@ -364,402 +252,6 @@ trigger: type: docker --- kind: pipeline -name: Build agent-flow (Linux amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= - make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (Linux arm64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= - make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (Linux ppc64le) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= - make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (Linux s390x) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= - make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (macOS Intel) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (macOS Apple Silicon) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (Windows amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-flow (FreeBSD amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (Linux amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= - make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (Linux arm64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= - make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (Linux ppc64le) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= - make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (Linux s390x) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= - make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (macOS Intel) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (macOS Apple Silicon) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (Windows amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agentctl (FreeBSD amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (Linux amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= - make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (Linux arm64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= - make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (Linux ppc64le) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= - make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (Linux s390x) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= - make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (macOS Intel) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (macOS Apple Silicon) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (Windows amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build operator (FreeBSD amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.32.0 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline name: Build agent-boringcrypto (Linux amd64 boringcrypto) platform: arch: amd64 @@ -794,7 +286,7 @@ trigger: type: docker --- kind: pipeline -name: Build agent-flow-windows-boringcrypto (Windows amd64) +name: Build agent-windows-boringcrypto (Windows amd64) platform: arch: amd64 os: linux @@ -802,7 +294,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= GOEXPERIMENT=cngcrypto - make agent-flow-windows-boringcrypto + make agent-windows-boringcrypto image: grafana/agent-build-image:0.32.0-boringcrypto name: Build trigger: @@ -818,8 +310,6 @@ platform: steps: - commands: - DOCKER_OPTS="" make dist/grafana-agent-linux-amd64 - - DOCKER_OPTS="" make dist/grafana-agentctl-linux-amd64 - - DOCKER_OPTS="" make dist.temp/grafana-agent-flow-linux-amd64 - DOCKER_OPTS="" make test-packages image: grafana/agent-build-image:0.32.0 name: Test Linux system packages @@ -917,6 +407,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 76dea65bd9dfc9b5f7b8e6c21d51e98a512879e6ff95e44b33c84025d801b959 +hmac: 118cf0f805912a99d8c268f18a179dbe69fa701fbf11f9a9effac933b63091e0 ... diff --git a/.drone/pipelines/check_containers.jsonnet b/.drone/pipelines/check_containers.jsonnet index bf070c44c1..28f4362310 100644 --- a/.drone/pipelines/check_containers.jsonnet +++ b/.drone/pipelines/check_containers.jsonnet @@ -3,13 +3,10 @@ local pipelines = import '../util/pipelines.jsonnet'; local linux_containers = [ { name: 'grafana/agent', make: 'make agent-image', path: 'cmd/grafana-agent/Dockerfile' }, - { name: 'grafana/agentctl', make: 'make agentctl-image', path: 'cmd/grafana-agentctl/Dockerfile' }, - { name: 'grafana/agent-operator', make: 'make operator-image', path: 'cmd/grafana-agent-operator/Dockerfile' }, ]; local windows_containers = [ { name: 'grafana/agent', argument: 'agent', path: 'cmd/grafana-agent/Dockerfile.windows' }, - { name: 'grafana/agentctl', argument: 'agentctl', path: 'cmd/grafana-agentctl/Dockerfile.windows' }, ]; ( diff --git a/.drone/pipelines/crosscompile.jsonnet b/.drone/pipelines/crosscompile.jsonnet index 6c6f7b2446..58c28575ad 100644 --- a/.drone/pipelines/crosscompile.jsonnet +++ b/.drone/pipelines/crosscompile.jsonnet @@ -30,16 +30,13 @@ local os_arch_tuples = [ local targets = [ 'agent', - 'agent-flow', - 'agentctl', - 'operator', ]; local targets_boringcrypto = [ 'agent-boringcrypto', ]; local targets_boringcrypto_windows = [ - 'agent-flow-windows-boringcrypto', + 'agent-windows-boringcrypto', ]; @@ -64,7 +61,7 @@ local build_environments(targets, tuples, image) = std.flatMap(function(target) target: target, tags: go_tags[platform.os], - } + (if 'experiment' in platform then { GOEXPERIMENT: platform.experiment } else { }), + } + (if 'experiment' in platform then { GOEXPERIMENT: platform.experiment } else {}), trigger: { event: ['pull_request'], @@ -75,7 +72,7 @@ local build_environments(targets, tuples, image) = std.flatMap(function(target) image: image, commands: [ 'make generate-ui', - (if 'GOEXPERIMENT' in env + (if 'GOEXPERIMENT' in env then 'GO_TAGS="%(tags)s" GOOS=%(GOOS)s GOARCH=%(GOARCH)s GOARM=%(GOARM)s GOEXPERIMENT=%(GOEXPERIMENT)s make %(target)s' % env else 'GO_TAGS="%(tags)s" GOOS=%(GOOS)s GOARCH=%(GOARCH)s GOARM=%(GOARM)s make %(target)s') % env, ], @@ -86,4 +83,4 @@ local build_environments(targets, tuples, image) = std.flatMap(function(target) build_environments(targets, os_arch_tuples, build_image.linux) + build_environments(targets_boringcrypto, os_arch_types_boringcrypto, build_image.linux) + -build_environments(targets_boringcrypto_windows, windows_os_arch_types_boringcrypto, build_image.boringcrypto) \ No newline at end of file +build_environments(targets_boringcrypto_windows, windows_os_arch_types_boringcrypto, build_image.boringcrypto) diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index bd578daca7..e3117042a3 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -6,7 +6,7 @@ local ghTokenFilename = '/drone/src/gh-token.txt'; // job_names gets the list of job names for use in depends_on. local job_names = function(jobs) std.map(function(job) job.name, jobs); -local linux_containers = ['agent', 'agent-boringcrypto', 'agentctl', 'agent-operator']; +local linux_containers = ['agent', 'agent-boringcrypto']; local linux_containers_jobs = std.map(function(container) ( pipelines.linux('Publish Linux %s container' % container) { trigger: { @@ -60,7 +60,7 @@ local linux_containers_jobs = std.map(function(container) ( } ), linux_containers); -local windows_containers = ['agent', 'agentctl']; +local windows_containers = ['agent']; local windows_containers_jobs = std.map(function(container) ( pipelines.windows('Publish Windows %s container' % container) { trigger: { diff --git a/.drone/pipelines/test.jsonnet b/.drone/pipelines/test.jsonnet index 6daa113d20..76e37c8b18 100644 --- a/.drone/pipelines/test.jsonnet +++ b/.drone/pipelines/test.jsonnet @@ -16,40 +16,6 @@ local pipelines = import '../util/pipelines.jsonnet'; }], }, - pipelines.linux('Test dashboards') { - trigger: { - event: ['pull_request'], - }, - steps: [{ - name: 'Regenerate dashboards', - image: build_image.linux, - - commands: [ - 'make generate-dashboards', - 'ERR_MSG="Dashboard definitions are out of date. Please run \'make generate-dashboards\' and commit changes!"', - // "git status --porcelain" reports if there's any new, modified, or deleted files. - 'if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi', - ], - }], - }, - - pipelines.linux('Test crds') { - trigger: { - event: ['pull_request'], - }, - steps: [{ - name: 'Regenerate crds', - image: build_image.linux, - - commands: [ - 'make generate-crds', - 'ERR_MSG="Custom Resource Definitions are out of date. Please run \'make generate-crds\' and commit changes!"', - // "git status --porcelain" reports if there's any new, modified, or deleted files. - 'if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi', - ], - }], - }, - pipelines.linux('Test') { trigger: { event: ['pull_request'], diff --git a/.drone/pipelines/test_packages.jsonnet b/.drone/pipelines/test_packages.jsonnet index c08ac0c0d3..b1b7d20715 100644 --- a/.drone/pipelines/test_packages.jsonnet +++ b/.drone/pipelines/test_packages.jsonnet @@ -19,8 +19,6 @@ local pipelines = import '../util/pipelines.jsonnet'; }], commands: [ 'DOCKER_OPTS="" make dist/grafana-agent-linux-amd64', - 'DOCKER_OPTS="" make dist/grafana-agentctl-linux-amd64', - 'DOCKER_OPTS="" make dist.temp/grafana-agent-flow-linux-amd64', 'DOCKER_OPTS="" make test-packages', ], }], diff --git a/Makefile b/Makefile index b88d9f1e49..c01014d475 100644 --- a/Makefile +++ b/Makefile @@ -24,37 +24,28 @@ ## binaries Compiles all binaries. ## agent Compiles cmd/grafana-agent to $(AGENT_BINARY) ## agent-boringcrypto Compiles cmd/grafana-agent with GOEXPERIMENT=boringcrypto to $(AGENT_BORINGCRYPTO_BINARY) -## agent-flow Compiles cmd/grafana-agent-flow to $(FLOW_BINARY) -## agent-flow-windows-boringcrypto Compiles cmd/grafana-agent-flow to $(FLOW_BINARY)-windows-boringcrypto +## agent-windows-boringcrypto Compiles cmd/grafana-agent to $(AGENT_BORINGCRYPTO_BINARY) ## agent-service Compiles cmd/grafana-agent-service to $(SERVICE_BINARY) -## agentctl Compiles cmd/grafana-agentctl to $(AGENTCTL_BINARY) -## operator Compiles cmd/grafana-agent-operator to $(OPERATOR_BINARY) ## ## Targets for building Docker images: ## ## images Builds all Docker images. ## agent-image Builds agent Docker image. ## agent-boringcrypto-image Builds agent Docker image with boringcrypto. -## agentctl-image Builds agentctl Docker image. -## operator-image Builds operator Docker image. ## ## Targets for packaging: ## ## dist Produce release assets for everything. ## dist-agent-binaries Produce release-ready agent binaries. -## dist-agentctl-binaries Produce release-ready agentctl binaries. ## dist-packages Produce release-ready DEB and RPM packages. ## dist-agent-installer Produce a Windows installer for Grafana Agent. ## ## Targets for generating assets: ## ## generate Generate everything. -## generate-crds Generate Grafana Agent Operator CRDs ands its documentation. ## generate-drone Generate the Drone YAML from Jsonnet. ## generate-helm-docs Generate Helm chart documentation. ## generate-helm-tests Generate Helm chart tests. -## generate-dashboards Generate dashboards in example/docker-compose after -## changing Jsonnet. ## generate-protos Generate protobuf files. ## generate-ui Generate the UI assets. ## generate-versioned-files Generate versioned files. @@ -70,40 +61,31 @@ ## ## Environment variables: ## -## USE_CONTAINER Set to 1 to enable proxying commands to build container -## AGENT_IMAGE Image name:tag built by `make agent-image` -## AGENTCTL_IMAGE Image name:tag built by `make agentctl-image` -## OPERATOR_IMAGE Image name:tag built by `make operator-image` -## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 -## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) -## AGENT_BORINGCRYPTO_BINARY Output path of `make agent-boringcrypto` (default build/grafana-agent-boringcrypto) -## FLOW_BINARY Output path of `make agent-flow` (default build/grafana-agent-flow) -## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) -## AGENTCTL_BINARY Output path of `make agentctl` (default build/grafana-agentctl) -## OPERATOR_BINARY Output path of `make operator` (default build/grafana-agent-operator) -## GOOS Override OS to build binaries for -## GOARCH Override target architecture to build binaries for -## GOARM Override ARM version (6 or 7) when GOARCH=arm -## CGO_ENABLED Set to 0 to disable Cgo for binaries. -## RELEASE_BUILD Set to 1 to build release binaries. -## VERSION Version to inject into built binaries. -## GO_TAGS Extra tags to use when building. -## DOCKER_PLATFORM Overrides platform to build Docker images for (defaults to host platform). -## GOEXPERIMENT Used to enable features, most likely boringcrypto via GOEXPERIMENT=boringcrypto. +## USE_CONTAINER Set to 1 to enable proxying commands to build container +## AGENT_IMAGE Image name:tag built by `make agent-image` +## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 +## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) +## AGENT_BORINGCRYPTO_BINARY Output path of `make agent-boringcrypto` (default build/grafana-agent-boringcrypto) +## AGENT_BORINGCRYPTO_WINDOWS_BINARY Output path of `make agent-windows-boringcrypto` (default build/grafana-agent-windows-boringcrypto.exe) +## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) +## GOOS Override OS to build binaries for +## GOARCH Override target architecture to build binaries for +## GOARM Override ARM version (6 or 7) when GOARCH=arm +## CGO_ENABLED Set to 0 to disable Cgo for binaries. +## RELEASE_BUILD Set to 1 to build release binaries. +## VERSION Version to inject into built binaries. +## GO_TAGS Extra tags to use when building. +## DOCKER_PLATFORM Overrides platform to build Docker images for (defaults to host platform). +## GOEXPERIMENT Used to enable features, most likely boringcrypto via GOEXPERIMENT=boringcrypto. include tools/make/*.mk AGENT_IMAGE ?= grafana/agent:latest AGENT_BORINGCRYPTO_IMAGE ?= grafana/agent-boringcrypto:latest -AGENTCTL_IMAGE ?= grafana/agentctl:latest -OPERATOR_IMAGE ?= grafana/agent-operator:latest AGENT_BINARY ?= build/grafana-agent AGENT_BORINGCRYPTO_BINARY ?= build/grafana-agent-boringcrypto -AGENT_BORINGCRYPTO_WINDOWS_BINARY ?= build/agent-flow-windows-boringcrypto.exe -FLOW_BINARY ?= build/grafana-agent-flow +AGENT_BORINGCRYPTO_WINDOWS_BINARY ?= build/grafana-agent-windows-boringcrypto.exe SERVICE_BINARY ?= build/grafana-agent-service -AGENTCTL_BINARY ?= build/grafana-agentctl -OPERATOR_BINARY ?= build/grafana-agent-operator AGENTLINT_BINARY ?= build/agentlint GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) @@ -115,9 +97,9 @@ GOEXPERIMENT ?= $(shell go env GOEXPERIMENT) # List of all environment variables which will propagate to the build # container. USE_CONTAINER must _not_ be included to avoid infinite recursion. PROPAGATE_VARS := \ - AGENT_IMAGE AGENTCTL_IMAGE OPERATOR_IMAGE \ + AGENT_IMAGE \ BUILD_IMAGE GOOS GOARCH GOARM CGO_ENABLED RELEASE_BUILD \ - AGENT_BINARY AGENT_BORINGCRYPTO_BINARY FLOW_BINARY AGENTCTL_BINARY OPERATOR_BINARY \ + AGENT_BINARY AGENT_BORINGCRYPTO_BINARY \ VERSION GO_TAGS GOEXPERIMENT # @@ -149,8 +131,7 @@ endif # # Targets for running tests # -# These targets currently don't support proxying to a build container due to -# difficulties with testing ./internal/util/k8s and testing packages. +# These targets currently don't support proxying to a build container. # .PHONY: lint @@ -163,7 +144,7 @@ lint: agentlint # more without -race for packages that have known race detection issues. test: $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) - $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/static/operator ./internal/util/k8s ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker + $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker test-packages: docker pull $(BUILD_IMAGE) @@ -177,8 +158,8 @@ integration-test: # Targets for building binaries # -.PHONY: binaries agent agent-boringcrypto agent-flow agentctl operator -binaries: agent agent-boringcrypto agent-flow agentctl operator +.PHONY: binaries agent agent-boringcrypto +binaries: agent agent-boringcrypto agent: ifeq ($(USE_CONTAINER),1) @@ -194,19 +175,11 @@ else GOEXPERIMENT=boringcrypto $(GO_ENV) go build $(GO_FLAGS) -o $(AGENT_BORINGCRYPTO_BINARY) ./cmd/grafana-agent endif -agent-flow-windows-boringcrypto: +agent-windows-boringcrypto: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - GOEXPERIMENT=cngcrypto $(GO_ENV) go build $(GO_FLAGS) -tags cngcrypto -o $(AGENT_BORINGCRYPTO_WINDOWS_BINARY) ./cmd/grafana-agent-flow -endif - - -agent-flow: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(GO_ENV) go build $(GO_FLAGS) -o $(FLOW_BINARY) ./cmd/grafana-agent-flow + GOEXPERIMENT=cngcrypto $(GO_ENV) go build $(GO_FLAGS) -tags cngcrypto -o $(AGENT_BORINGCRYPTO_WINDOWS_BINARY) ./cmd/grafana-agent endif # agent-service is not included in binaries since it's Windows-only. @@ -217,20 +190,6 @@ else $(GO_ENV) go build $(GO_FLAGS) -o $(SERVICE_BINARY) ./cmd/grafana-agent-service endif -agentctl: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(GO_ENV) go build $(GO_FLAGS) -o $(AGENTCTL_BINARY) ./cmd/grafana-agentctl -endif - -operator: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(GO_ENV) go build $(GO_FLAGS) -o $(OPERATOR_BINARY) ./cmd/grafana-agent-operator -endif - agentlint: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) @@ -248,32 +207,20 @@ ifneq ($(DOCKER_PLATFORM),) DOCKER_FLAGS += --platform=$(DOCKER_PLATFORM) endif -.PHONY: images agent-image agentctl-image operator-image -images: agent-image agentctl-image operator-image +.PHONY: images agent-image +images: agent-image agent-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENT_IMAGE) -f cmd/grafana-agent/Dockerfile . -agentctl-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENTCTL_IMAGE) -f cmd/grafana-agentctl/Dockerfile . agent-boringcrypto-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) --build-arg GOEXPERIMENT=boringcrypto -t $(AGENT_BORINGCRYPTO_IMAGE) -f cmd/grafana-agent/Dockerfile . -operator-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(OPERATOR_IMAGE) -f cmd/grafana-agent-operator/Dockerfile . # # Targets for generating assets # -.PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-dashboards generate-protos generate-ui generate-versioned-files -generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-dashboards generate-protos generate-ui generate-versioned-files generate-docs - -generate-crds: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - bash ./tools/generate-crds.bash - gen-crd-api-reference-docs -config tools/gen-crd-docs/config.json -api-dir "github.com/grafana/agent/internal/static/operator/apis/monitoring/" -out-file docs/sources/operator/api.md -template-dir tools/gen-crd-docs/template -endif +.PHONY: generate generate-drone generate-helm-docs generate-helm-tests generate-protos generate-ui generate-versioned-files +generate: generate-drone generate-helm-docs generate-helm-tests generate-protos generate-ui generate-versioned-files generate-docs generate-drone: drone jsonnet -V BUILD_IMAGE_VERSION=$(BUILD_IMAGE_VERSION) --stream --format --source .drone/drone.jsonnet --target .drone/drone.yml @@ -292,14 +239,6 @@ else bash ./operations/helm/scripts/rebuild-tests.sh endif -generate-dashboards: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - cd example/docker-compose && jb install && \ - cd grafana/dashboards && jsonnet template.jsonnet -J ../../vendor -m . -endif - generate-protos: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) @@ -350,12 +289,8 @@ clean: clean-dist clean-build-container-cache info: @printf "USE_CONTAINER = $(USE_CONTAINER)\n" @printf "AGENT_IMAGE = $(AGENT_IMAGE)\n" - @printf "AGENTCTL_IMAGE = $(AGENTCTL_IMAGE)\n" - @printf "OPERATOR_IMAGE = $(OPERATOR_IMAGE)\n" @printf "BUILD_IMAGE = $(BUILD_IMAGE)\n" @printf "AGENT_BINARY = $(AGENT_BINARY)\n" - @printf "AGENTCTL_BINARY = $(AGENTCTL_BINARY)\n" - @printf "OPERATOR_BINARY = $(OPERATOR_BINARY)\n" @printf "GOOS = $(GOOS)\n" @printf "GOARCH = $(GOARCH)\n" @printf "GOARM = $(GOARM)\n" diff --git a/cmd/grafana-agent-flow/main.go b/cmd/grafana-agent-flow/main.go deleted file mode 100644 index 6ba087e38f..0000000000 --- a/cmd/grafana-agent-flow/main.go +++ /dev/null @@ -1,32 +0,0 @@ -// Command grafana-agent-flow is an Flow mode-only binary. It acts as an -// alternative to grafana-agent in environments where users want to run Flow -// mode alongside static mode and control versions separately. -// -// Use grafana-agent instead for a binary which can switch between static mode -// and Flow mode at runtime. -package main - -import ( - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/flowmode" - "github.com/prometheus/client_golang/prometheus" - - // Register Prometheus SD components - _ "github.com/grafana/loki/clients/pkg/promtail/discovery/consulagent" - _ "github.com/prometheus/prometheus/discovery/install" - - // Register integrations - _ "github.com/grafana/agent/internal/static/integrations/install" - - // Embed a set of fallback X.509 trusted roots - // Allows the app to work correctly even when the OS does not provide a verifier or systems roots pool - _ "golang.org/x/crypto/x509roots/fallback" -) - -func init() { - prometheus.MustRegister(build.NewCollector("agent")) -} - -func main() { - flowmode.Run() -} diff --git a/cmd/grafana-agent-operator/DEVELOPERS.md b/cmd/grafana-agent-operator/DEVELOPERS.md deleted file mode 100644 index 58f7be9ae8..0000000000 --- a/cmd/grafana-agent-operator/DEVELOPERS.md +++ /dev/null @@ -1,189 +0,0 @@ -# Developer's Guide - -This document contains maintainer-specific information. - -Table of Contents: - -1. [Introduction](#introduction) -2. [Updating CRDs](#updating-crds) -3. [Testing Locally](#testing-locally) -4. [Development Architecture](#development-architecture) - -## Introduction - -Kubernetes Operators are designed to automate the behavior of human operators -for pieces of software. The Grafana Agent Operator, in particular, is based off -of the very popular [Prometheus -Operator](https://github.com/prometheus-operator/prometheus-operator): - -1. We use the same v1 CRDs from the official project. -2. We aim to generate the same remote_write and scrape_configs that the - Prometheus Operator does. - -That being said, we're not fully compatible, and the Grafana Agent Operator has -the same trade-offs that the Grafana Agent does: no recording rules, no alerts, -no local storage for querying metrics. - -The public [Grafana Agent Operator design -doc](https://docs.google.com/document/d/1nlwhJLspTkkm8vLgrExJgf02b9GCAWv_Ci_a9DliI_s) -goes into more detail about the context and design decisions being made. - -## Updating CRDs - -The `make generate-crds` command at the root of this repository will generate CRDs and -other code used by the operator. This calls the [generate-crds -script](../../tools/generate-crds.bash) in a container. If you wish to call this -script manually, you must also install `controller-gen` and `gen-crd-api-reference-docs`. -Ensure to keep the version in sync with what's defined in the `Dockerfile`. - -``` -go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2 -go install github.com/ahmetb/gen-crd-api-reference-docs@v0.3.1-0.20220618162802-424739b250f5 -``` - -Use the following to run the script in a container: - -``` -USE_CONTAINER=1 make generate-crds -``` -## Testing Locally - -Create a k3d cluster (depending on k3d v4.x): - -``` -k3d cluster create agent-operator \ - --port 30080:80@loadbalancer \ - --api-port 50043 \ - --kubeconfig-update-default=true \ - --kubeconfig-switch-context=true \ - --wait -``` - -### Deploy Prometheus - -An example Prometheus server is provided in `./example-prometheus.yaml`. Deploy -it with the following, from the root of the repository: - -``` -kubectl apply -f ./cmd/grafana-agent-operator/example-prometheus.yaml -``` - -You can view it at http://prometheus.k3d.localhost:30080 once the k3d cluster is -running. - -### Apply the CRDs - -Generated CRDs used by the operator can be found in [the Production -folder](../../operations/agent-static-operator/crds). Deploy them from the root of the -repository with: - -``` -kubectl apply -f production/operator/crds -``` - -### Run the Operator - -Now that the CRDs are applied, you can run the operator from the root of the -repository: - -``` -go run ./cmd/grafana-agent-operator -``` - -### Apply a GrafanaAgent custom resource - -Finally, you can apply an example GrafanaAgent custom resource. One is [provided -for you](../../cmd/grafana-agent-operator/agent-example-config.yaml). From the root of the repository, run: - -``` -kubectl apply -f ./cmd/grafana-agent-operator/agent-example-config.yaml -``` - -If you are running the operator, you should see it pick up the change and start -mutating the cluster. - -## Development Architecture - -This project makes heavy use of the [Kubernetes SIG Controller -Runtime](https://pkg.go.dev/sigs.k8s.io/controller-runtime) project. That -project has its own documentation, but for a high level overview of how it -relates to this project: - -1. The Grafana Agent Operator is composed of a single _controller_. A - _controller_ is responsible for responding to changes to Kubernetes resources. - -2. Controllers can be notified about changes to: - - 1. One Primary resource (i.e., the GrafanaAgent CR) - - 2. Any number of secondary resources used to deploy the managed software - (e.g., ServiceMonitor, PodMonitors). This is done using a custom event - handler, which we'll detail below. - - 3. Any number of resources the Operator deploys (ConfigMaps, Secrets, - StatefulSets). This is done using - [ownerReferences](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents). - -3. Controllers have one _reconciler_. The reconciler handles updating managed - resources for one specific primary resource. The `GrafanaAgent` CRD is - the primary resource, and the reconciler will handle updating managed - resources for all discovered GrafanaAgent CRs. Each reconcile request is for - a specific CR, such as `agent-1` or `agent-2`. - -4. A _manager_ initializes all controllers for a project. It provides a caching - Kubernetes client and propagates Kubernetes events to controllers. - -An `EnqueueRequestForSelector` event handler was added to handle dealing to -changes to secondary resources, which is not a concept in the official -Controller Runtime project. This works by allowing the reconciler to request -events for a given primary resource if one of the secondary resource changes. -This means that multiple primary resources can watch a ServiceMonitor and cause -a reconcile when it changes. - -Event handlers are specific to a resource, so there is one -`EnqueueRequestForSelector` handler per secondary resource. - -Reconciles are supposed to be idempotent, so deletes, updates, and creates -should be treated the same. All managed resources are deployed with -ownerReferences set, so managed resources will be automatically deleted by -Kubernetes' garbage collector when the primary resource gets deleted by the -user. - -### Flow - -This section walks through what happens when a user deploys a new GrafanaAgent -CR: - -1. A GrafanaAgent CR `default/agent` gets deployed to a cluster - -2. The Controller's event handlers get notified about the event and queue a - reconcile request for `default/agent`. - -3. The reonciler discovers all secondary `MetricsInstance` referenced by - `default/agent`. - -4. The reconciler discovers all secondary `ServiceMonitor`, `PodMonitor` and - `Probe` resources that are referenced by the discovered `MetricsInstance` - resource. - -5. The reconciler informs the appropriate `EnqueueRequestForSelector` event - handlers that changes to those resources should cause a new reconcile for - `default/agent`. - -6. The reconciler discovers all `Secrets` referenced across all current - resources. The content of the secrets are held in-memory to statically - configure Grafana Agent fields that do not support reading in from a file - (e.g., basic auth username). - -7. All the discovered secrets are copied to a new Secret in the `default` - namespace. This is done in case a `ServiceMonitor` is found in a different - namespace than where the Agent will be deployed. - -8. A new Secret is created for the configuration of the Grafana Agent. - -9. A StatefulSet is generated for the Grafana Agent. - -When `default/agent` gets deleted, all `EnqueueRequestForSelector` event -handlers get notified to stop sending events for `default/agent`. - - diff --git a/cmd/grafana-agent-operator/Dockerfile b/cmd/grafana-agent-operator/Dockerfile deleted file mode 100644 index 3759851610..0000000000 --- a/cmd/grafana-agent-operator/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -# syntax=docker/dockerfile:1.4 - -# NOTE: This Dockerfile can only be built using BuildKit. BuildKit is used by -# default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set -# in environment variables. - -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.32.0 as build -ARG BUILDPLATFORM -ARG TARGETPLATFORM -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -ARG RELEASE_BUILD=1 -ARG VERSION - -COPY . /src/agent -WORKDIR /src/agent - -RUN --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=cache,target=/go/pkg/mod \ - GOOS=$TARGETOS GOARCH=$TARGETARCH GOARM=${TARGETVARIANT#v} GO_TAGS=netgo \ - RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} \ - make operator - -FROM ubuntu:mantic - -LABEL org.opencontainers.image.source="https://github.com/grafana/agent" - -# Install dependencies needed at runtime. -RUN < duration.Seconds() { - http.Error(rw, "duration value exceeds the server's write timeout", http.StatusBadRequest) - return - } - duration = time.Duration(d) * time.Second - } - ctx, cancel := context.WithTimeout(context.Background(), duration) - defer cancel() - - ep.mut.Lock() - var ( - enabledFeatures = ep.cfg.EnabledFeatures - httpSrvAddress = ep.cfg.ServerFlags.HTTP.ListenAddress - ) - ep.mut.Unlock() - - var logsBuffer bytes.Buffer - logger := log.NewSyncLogger(log.NewLogfmtLogger(&logsBuffer)) - defer func() { - ep.log.HookLogger.Set(nil) - }() - ep.log.HookLogger.Set(logger) - - var configBytes []byte - var err error - if cfg.EnableConfigEndpoints { - configBytes, err = yaml.Marshal(cfg) - if err != nil { - http.Error(rw, fmt.Sprintf("failed to marshal config: %s", err), http.StatusInternalServerError) - } - } - - bundle, err := supportbundle.Export(ctx, enabledFeatures, configBytes, httpSrvAddress, ep.srv.DialContext) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - if err := supportbundle.Serve(rw, bundle, &logsBuffer); err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } -} - -// TriggerReload will cause the Entrypoint to re-request the config file and -// apply the latest config. TriggerReload returns true if the reload was -// successful. -func (ep *Entrypoint) TriggerReload() bool { - level.Info(ep.log).Log("msg", "reload of config file requested") - - cfg, err := ep.reloader(ep.log) - if err != nil { - level.Error(ep.log).Log("msg", "failed to reload config file", "err", err) - return false - } - cfg.LogDeprecations(ep.log) - - err = ep.ApplyConfig(*cfg) - if err != nil { - level.Error(ep.log).Log("msg", "failed to reload config file", "err", err) - return false - } - - return true -} - -// pollConfig triggers a reload of the config on each tick of the ticker until the context -// completes. -func (ep *Entrypoint) pollConfig(ctx context.Context, sleepTime time.Duration) error { - // Add an initial jitter to requests - time.Sleep(ep.cfg.AgentManagement.JitterTime()) - - t := time.NewTicker(sleepTime) - for { - select { - case <-ctx.Done(): - return nil - case <-t.C: - ok := ep.TriggerReload() - if !ok { - level.Error(ep.log).Log("msg", "config reload did not succeed") - } - } - } -} - -// Stop stops the Entrypoint and all subsystems. -func (ep *Entrypoint) Stop() { - ep.mut.Lock() - defer ep.mut.Unlock() - - ep.integrations.Stop() - ep.lokiLogs.Stop() - ep.promMetrics.Stop() - ep.tempoTraces.Stop() - ep.srv.Close() - - if ep.reloadServer != nil { - ep.reloadServer.Close() - } -} - -// Start starts the server used by the Entrypoint, and will block until a -// termination signal is sent to the process. -func (ep *Entrypoint) Start() error { - var g run.Group - - // Create a signal handler that will stop the Entrypoint once a termination - // signal is received. - signalHandler := signals.NewHandler(ep.log) - - notifier := make(chan os.Signal, 1) - signal.Notify(notifier, syscall.SIGHUP) - - defer func() { - signal.Stop(notifier) - close(notifier) - }() - - g.Add(func() error { - signalHandler.Loop() - return nil - }, func(e error) { - signalHandler.Stop() - }) - - if ep.reloadServer != nil && ep.reloadListener != nil { - g.Add(func() error { - return ep.reloadServer.Serve(ep.reloadListener) - }, func(e error) { - ep.reloadServer.Close() - }) - } - - if ep.cfg.AgentManagement.Enabled { - managementContext, managementCancel := context.WithCancel(context.Background()) - defer managementCancel() - - sleepTime := ep.cfg.AgentManagement.SleepTime() - g.Add(func() error { - return ep.pollConfig(managementContext, sleepTime) - }, func(e error) { - managementCancel() - }) - } - - srvContext, srvCancel := context.WithCancel(context.Background()) - defer srvCancel() - defer ep.srv.Close() - - g.Add(func() error { - return ep.srv.Run(srvContext) - }, func(e error) { - srvCancel() - }) - - ep.mut.Lock() - cfg := ep.cfg - ep.mut.Unlock() - if cfg.EnableUsageReport { - g.Add(func() error { - return ep.reporter.Start(srvContext, ep.getReporterMetrics) - }, func(e error) { - srvCancel() - }) - } - - go func() { - for range notifier { - ep.TriggerReload() - } - }() - - return g.Run() -} diff --git a/cmd/grafana-agent/main.go b/cmd/grafana-agent/main.go index 750cecead2..5bedc67367 100644 --- a/cmd/grafana-agent/main.go +++ b/cmd/grafana-agent/main.go @@ -1,18 +1,8 @@ package main import ( - "flag" - "log" - "os" - - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/boringcrypto" "github.com/grafana/agent/internal/build" "github.com/grafana/agent/internal/flowmode" - "github.com/grafana/agent/internal/static/config" - "github.com/grafana/agent/internal/static/server" - util_log "github.com/grafana/agent/internal/util/log" - "github.com/prometheus/client_golang/prometheus" // Register Prometheus SD components @@ -32,58 +22,5 @@ func init() { } func main() { - // If Windows is trying to run as a service, go through that - // path instead. - if IsWindowsService() { - err := RunService() - if err != nil { - log.Fatalln(err) - } - return - } - - runMode, err := getRunMode() - if err != nil { - log.Fatalln(err) - } - - // NOTE(rfratto): Flow when run through the primary Grafana Agent binary does - // not support being run as a Windows service. To run Flow mode as a Windows - // service, use cmd/grafana-agent-service and cmd/grafana-agent-flow instead. - if runMode == runModeFlow { - flowmode.Run() - return - } - - // Set up logging using default values before loading the config - defaultCfg := server.DefaultConfig() - logger := server.NewLogger(&defaultCfg) - - reloader := func(log *server.Logger) (*config.Config, error) { - fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) - return config.Load(fs, os.Args[1:], log) - } - cfg, err := reloader(logger) - if err != nil { - log.Fatalln(err) - } - - // After this point we can start using go-kit logging. - logger = server.NewLogger(cfg.Server) - util_log.Logger = logger - - level.Info(logger).Log("boringcrypto enabled", boringcrypto.Enabled) - ep, err := NewEntrypoint(logger, cfg, reloader) - if err != nil { - level.Error(logger).Log("msg", "error creating the agent server entrypoint", "err", err) - os.Exit(1) - } - - if err = ep.Start(); err != nil { - level.Error(logger).Log("msg", "error running agent", "err", err) - // Don't os.Exit here; we want to do cleanup by stopping promMetrics - } - - ep.Stop() - level.Info(logger).Log("msg", "agent exiting") + flowmode.Run() } diff --git a/cmd/grafana-agent/mode.go b/cmd/grafana-agent/mode.go deleted file mode 100644 index 7ff417bc48..0000000000 --- a/cmd/grafana-agent/mode.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "fmt" - "os" -) - -type runMode int8 - -const ( - runModeInvalid runMode = iota - runModeStatic - runModeFlow -) - -func getRunMode() (runMode, error) { - key, found := os.LookupEnv("AGENT_MODE") - if !found { - return runModeStatic, nil - } - - switch key { - case "flow": - return runModeFlow, nil - case "static", "": - return runModeStatic, nil - default: - return runModeInvalid, fmt.Errorf("unrecognized run mode %q", key) - } -} diff --git a/cmd/grafana-agent/run-config.river b/cmd/grafana-agent/run-config.river deleted file mode 100644 index eaf6f795b0..0000000000 --- a/cmd/grafana-agent/run-config.river +++ /dev/null @@ -1,5 +0,0 @@ -server { - http { - listen_addr = "127.0.0.1:12381" - } -} diff --git a/cmd/grafana-agent/service.go b/cmd/grafana-agent/service.go deleted file mode 100644 index cbdbe1a73a..0000000000 --- a/cmd/grafana-agent/service.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !windows - -package main - -// IsWindowsService returns whether the current process is running as a Windows -// Service. On non-Windows platforms, this always returns false. -func IsWindowsService() bool { - return false -} - -// RunService runs the current process as a Windows service. On non-Windows platforms, -// this is always a no-op. -func RunService() error { - return nil -} diff --git a/cmd/grafana-agent/service_windows.go b/cmd/grafana-agent/service_windows.go deleted file mode 100644 index 1c94c71272..0000000000 --- a/cmd/grafana-agent/service_windows.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build windows - -package main - -import ( - "flag" - "log" - "os" - - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/config" - "github.com/grafana/agent/internal/static/server" - util_log "github.com/grafana/agent/internal/util/log" - - "golang.org/x/sys/windows/svc" -) - -const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown - -// AgentService runs the Grafana Agent as a service. -type AgentService struct{} - -// Execute starts the AgentService. -func (m *AgentService) Execute(args []string, serviceRequests <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { - changes <- svc.Status{State: svc.StartPending} - - // Executable name and any command line parameters will be placed into os.args, this comes from - // registry key `Computer\HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Services\\ImagePath` - // oddly enough args is blank - - // Set up logging using default values before loading the config - defaultServerCfg := server.DefaultConfig() - logger := server.NewWindowsEventLogger(&defaultServerCfg) - - reloader := func(log *server.Logger) (*config.Config, error) { - fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) - return config.Load(fs, os.Args[1:], log) - } - cfg, err := reloader(logger) - if err != nil { - log.Fatalln(err) - } - - // Pause is not accepted, we immediately set the service as running and trigger the entrypoint load in the background - // this is because the WAL is reloaded and the timeout for a windows service starting is 30 seconds. In this case - // the service is running but Agent may still be starting up reading the WAL and doing other operations. - changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} - - // After this point we can start using go-kit logging. - logger = server.NewWindowsEventLogger(cfg.Server) - util_log.Logger = logger - - entrypointExit := make(chan error) - - // Kick off the server in the background so that we can respond to status queries - var ep *Entrypoint - go func() { - ep, err = NewEntrypoint(logger, cfg, reloader) - if err != nil { - level.Error(logger).Log("msg", "error creating the agent server entrypoint", "err", err) - os.Exit(1) - } - entrypointExit <- ep.Start() - }() - -loop: - for { - select { - case c := <-serviceRequests: - switch c.Cmd { - case svc.Interrogate: - changes <- c.CurrentStatus - case svc.Stop, svc.Shutdown: - break loop - case svc.Pause: - case svc.Continue: - default: - break loop - } - case err := <-entrypointExit: - level.Error(logger).Log("msg", "error while running agent server entrypoint", "err", err) - break loop - } - } - // There is a chance the entrypoint may not be setup yet, in that case we don't want to stop. - // Since it is in another go func it may start after this has returned, in either case the program - // will exit. - if ep != nil { - ep.Stop() - } - changes <- svc.Status{State: svc.StopPending} - return -} - -// IsWindowsService returns whether the current process is running as a Windows -// Service. On non-Windows platforms, this always returns false. -func IsWindowsService() bool { - isService, err := svc.IsWindowsService() - if err != nil { - return false - } - return isService -} - -// RunService runs the current process as a Windows servce. On non-Windows platforms, -// this is always a no-op. -func RunService() error { - return svc.Run(server.ServiceName, &AgentService{}) -} diff --git a/cmd/grafana-agentctl/Dockerfile b/cmd/grafana-agentctl/Dockerfile deleted file mode 100644 index a96ac1a6a8..0000000000 --- a/cmd/grafana-agentctl/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# syntax=docker/dockerfile:1.4 - -# NOTE: This Dockerfile can only be built using BuildKit. BuildKit is used by -# default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set -# in environment variables. - -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.32.0 as build -ARG BUILDPLATFORM -ARG TARGETPLATFORM -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -ARG RELEASE_BUILD=1 -ARG VERSION - -COPY . /src/agent -WORKDIR /src/agent - -RUN --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=cache,target=/go/pkg/mod \ - GOOS="$TARGETOS" GOARCH="$TARGETARCH" GOARM="${TARGETVARIANT#v}" \ - RELEASE_BUILD="${RELEASE_BUILD}" VERSION="${VERSION}" \ - GO_TAGS="netgo promtail_journal_enabled" \ - make agentctl - -FROM ubuntu:mantic - -LABEL org.opencontainers.image.source="https://github.com/grafana/agent" - -# Install dependencies needed at runtime. -RUN < cardinality[j].Instances - }) - - fmt.Printf("Metric cardinality:\n\n") - - for _, metric := range cardinality { - fmt.Printf("%s: %d\n", metric.Metric, metric.Instances) - } - }, - } - - cmd.Flags().StringVarP(&jobLabel, "job", "j", "", "job label to search for") - cmd.Flags().StringVarP(&instanceLabel, "instance", "i", "", "instance label to search for") - must(cmd.MarkFlagRequired("job")) - must(cmd.MarkFlagRequired("instance")) - return cmd -} - -func walStatsCmd() *cobra.Command { - return &cobra.Command{ - Use: "wal-stats [WAL directory]", - Short: "Collect stats on the WAL", - Long: `wal-stats reads a WAL directory and collects information on the series and -samples within it. - -The "Hash Collisions" value refers to the number of ref IDs a label's hash was -assigned to. A non-zero amount of collisions has no negative effect on the data -sent to the Remote Write endpoint, but may have an impact on memory usage. Labels -may collide with multiple ref IDs normally if a series flaps (i.e., gets marked for -deletion but then comes back at some point).`, - Args: cobra.ExactArgs(1), - - Run: func(_ *cobra.Command, args []string) { - directory := args[0] - if _, err := os.Stat(directory); os.IsNotExist(err) { - fmt.Printf("%s does not exist\n", directory) - os.Exit(1) - } else if err != nil { - fmt.Printf("error getting wal: %v\n", err) - os.Exit(1) - } - - // Check if ./wal is a subdirectory, use that instead. - if _, err := os.Stat(filepath.Join(directory, "wal")); err == nil { - directory = filepath.Join(directory, "wal") - } - - stats, err := waltools.CalculateStats(directory) - if err != nil { - fmt.Printf("failed to get WAL stats: %v\n", err) - os.Exit(1) - } - - fmt.Printf("Oldest Sample: %s\n", stats.From) - fmt.Printf("Newest Sample: %s\n", stats.To) - fmt.Printf("Total Series: %d\n", stats.Series()) - fmt.Printf("Total Samples: %d\n", stats.Samples()) - fmt.Printf("Hash Collisions: %d\n", stats.HashCollisions) - fmt.Printf("Invalid Refs: %d\n", stats.InvalidRefs) - fmt.Printf("Checkpoint Segment: %d\n", stats.CheckpointNumber) - fmt.Printf("First Segment: %d\n", stats.FirstSegment) - fmt.Printf("Latest Segment: %d\n", stats.LastSegment) - - fmt.Printf("\nPer-target stats:\n") - - table := tablewriter.NewWriter(os.Stdout) - defer table.Render() - - table.SetHeader([]string{"Job", "Instance", "Series", "Samples"}) - - sort.Sort(waltools.BySeriesCount(stats.Targets)) - - for _, t := range stats.Targets { - seriesStr := fmt.Sprintf("%d", t.Series) - samplesStr := fmt.Sprintf("%d", t.Samples) - table.Append([]string{t.Job, t.Instance, seriesStr, samplesStr}) - } - }, - } -} - -func operatorDetachCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "operator-detach", - Short: "Detaches any Operator-Managed resource so CRDs can temporarily be deleted", - Long: `operator-detach will find Grafana Agent Operator-Managed resources across the cluster and edit them to remove the OwnerReferences tying them to a GrafanaAgent CRD. This allows the CRDs to be modified without losing the deployment of Grafana Agents.`, - Args: cobra.ExactArgs(0), - - RunE: func(_ *cobra.Command, args []string) error { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) - scheme := runtime.NewScheme() - hadErrors := false - - for _, add := range []func(*runtime.Scheme) error{ - core_v1.AddToScheme, - apps_v1.AddToScheme, - } { - if err := add(scheme); err != nil { - return fmt.Errorf("unable to register scheme: %w", err) - } - } - - cli, err := kclient.New(kconfig.GetConfigOrDie(), kclient.Options{ - Scheme: scheme, - Mapper: nil, - }) - if err != nil { - return fmt.Errorf("unable to generate Kubernetes client: %w", err) - } - - // Resources to list - lists := []kclient.ObjectList{ - &apps_v1.StatefulSetList{}, - &apps_v1.DaemonSetList{}, - &core_v1.SecretList{}, - &core_v1.ServiceList{}, - } - for _, l := range lists { - gvk, err := apiutil.GVKForObject(l, scheme) - if err != nil { - return fmt.Errorf("failed to get GroupVersionKind: %w", err) - } - level.Info(logger).Log("msg", "getting objects for resource", "resource", gvk.Kind) - - err = cli.List(context.Background(), l, &kclient.ListOptions{ - LabelSelector: labels.Everything(), - FieldSelector: fields.Everything(), - Namespace: "", - }) - if err != nil { - level.Error(logger).Log("msg", "failed to list resource", "resource", gvk.Kind, "err", err) - hadErrors = true - continue - } - - elements, err := meta.ExtractList(l) - if err != nil { - level.Error(logger).Log("msg", "failed to get elements for resource", "resource", gvk.Kind, "err", err) - hadErrors = true - continue - } - for _, e := range elements { - obj := e.(kclient.Object) - - filtered, changed := filterAgentOwners(obj.GetOwnerReferences()) - if !changed { - continue - } - - level.Info(logger).Log("msg", "detaching ownerreferences for object", "resource", gvk.Kind, "namespace", obj.GetNamespace(), "name", obj.GetName()) - obj.SetOwnerReferences(filtered) - - if err := cli.Update(context.Background(), obj); err != nil { - level.Error(logger).Log("msg", "failed to update object", "resource", gvk.Kind, "namespace", obj.GetNamespace(), "name", obj.GetName(), "err", err) - hadErrors = true - continue - } - } - } - - if hadErrors { - return fmt.Errorf("encountered errors during execution") - } - return nil - }, - } - - return cmd -} - -func filterAgentOwners(refs []meta_v1.OwnerReference) (filtered []meta_v1.OwnerReference, changed bool) { - filtered = make([]meta_v1.OwnerReference, 0, len(refs)) - - for _, ref := range refs { - if ref.Kind == "GrafanaAgent" && strings.HasPrefix(ref.APIVersion, "monitoring.grafana.com/") { - changed = true - continue - } - filtered = append(filtered, ref) - } - return -} - -func testLogs() *cobra.Command { - cmd := &cobra.Command{ - Use: "test-logs [config file]", - Short: "Collect logs but print entries instead of sending them to Loki.", - Long: `Starts Promtail using its '--dry-run' flag, which will only print logs instead of sending them to the remote server. - This can be useful for debugging and understanding how logs are being parsed.`, - Args: cobra.ExactArgs(1), - - Run: func(_ *cobra.Command, args []string) { - file := args[0] - - cfg := config.Config{} - err := config.LoadFile(file, false, &cfg) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to validate config: %s\n", err) - os.Exit(1) - } - - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - l, err := logs.New(prometheus.NewRegistry(), cfg.Logs, logger, true) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to start log collection: %s\n", err) - os.Exit(1) - } - defer l.Stop() - - // Block until a shutdown signal is received. - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - sig := <-sigs - fmt.Fprintf(os.Stderr, "received shutdown %v signal, stopping...", sig) - }, - } - - return cmd -} - -func must(err error) { - if err != nil { - panic(err) - } -} diff --git a/example/docker-compose/README.md b/example/docker-compose/README.md deleted file mode 100644 index 5480f028ff..0000000000 --- a/example/docker-compose/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Example - -This directory contains a Docker Compose 3 environment that can be used to test -Grafana Agent. - -By default, the following services are exposed: - -1. Mimir for storing metrics (localhost:9009) -2. Grafana for visualizing telemetry (localhost:3000) -3. Loki for storing logs (localhost:3100) -4. Tempo for storing traces (localhost:3200) -5. Avalanche for a sample /metrics endpoint to scrape (localhost:9001). - -Run the following to bring up the environment: - -``` -docker-compose up -d -``` - -By default, the Docker Compose environment doesn't include a Grafana Agent -container. This lets you test the agent externally, especially useful when -validating code changes. You can enable the included Grafana Agent by passing -`agent` to the profiles list: `docker compose --profile=agent up -d`. When -running, the Agent exposes its HTTP endpoint at localhost:12345. This address -can be changed with the `--server.http.address` flag (e.g., -`--server.http.address=127.0.0.1:8000`). - -The Docker Compose environment heavily relies on profiles to enable optional -features. You can pass multiple profiles by passing the flag multiple times: -`docker compose --profile agent --profile integrations up -d`. - -## Running Integrations - -The Docker Compose environment includes example services to point integrations -at (e.g., mysql, redis, consul, etc.). You can run all integrations using the -`integrations` profile, or run individual services with a profile name matching -the integration (i.e., enabling the `dnsmasq_exporter` profile to specifically -enable the dnsmasq service). - -Enabling specific integration profiles is useful when you only want to test a -single integration, as the `integrations` profile can be resource intensive. - -## Visualizing - -Grafana is exposed at `http://localhost:3000`, and includes some useful -dashboards: - -* The `Agent` dashboard gives a very high-level overview of running agents. - -* The `Agent Prometheus Remote Write` dashboard visualizes the current state of - writing metrics to Mimir. - -* The `Agent Tracing Pipeline` dashboard visualizes the current state of the - tracing pipeline (if spans are being processed). - -* The `Agent Operational` dashboard shows resource consumption of Grafana - Agent. Not all panels will have data here, as they rely on metrics from other - sources (i.e., cAdvisor). diff --git a/example/docker-compose/agent/config/agent.yaml b/example/docker-compose/agent/config/agent.yaml deleted file mode 100644 index e611de88f5..0000000000 --- a/example/docker-compose/agent/config/agent.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# This file serves as an example agent configuration to interact with the -# docker compose environment. -# -# You should pass the following command line flags when running the agent -# locally and using this file: -# -# -enable-features=integrations-next -config.expand-env -config.enable-read-api -# -# -enable-features=integrations-next is required as the file is configured for -# the integrations revamp. -# -# -config.expand-env is required to expand environment variables. Environment -# variables are used when running the agent inside of docker-compose to connect -# to the other services. When running the agent externally, the expressions will -# default to the appropriate values of the exposed ports. -# -# -config.enable-read-api is optional, but allows you to invoke the /-/config -# endpoint to examine the generated config. - -server: - log_level: debug - -metrics: - global: - scrape_interval: 60s - remote_write: - - url: http://${REMOTE_WRITE_HOST:-localhost:9009}/api/v1/push - configs: - - name: default - scrape_configs: - - job_name: avalanche - static_configs: - - targets: ['${AVALANCHE_HOST:-localhost:9001}'] - -logs: - configs: - - name: default - clients: - - url: http://${LOKI_HOST:-localhost:3100}/loki/api/v1/push - positions: - filename: /tmp/positions.yaml - scrape_configs: - ## Uncomment to read logs from /var/log - - job_name: system - static_configs: - - targets: [localhost] - labels: - job: varlogs - __path__: /var/log/*log - -traces: - configs: - - name: default - remote_write: - - endpoint: ${TEMPO_HOST:-localhost:4317} - insecure: true - receivers: - jaeger: - protocols: - thrift_http: - -# -# Integrations -# -# Uncomment individual integrations below to enable them. Some integrations are -# enabled by default. -# - -integrations: - metrics: - autoscrape: - enable: true - metrics_instance: default - - # agent - agent: - # The Agent dashboards are written to assume Kubernetes, so we inject some - # fake Kubernetes labels here. - extra_labels: - cluster: docker-compose - namespace: docker-compose - container: grafana-agent - pod: grafana-agent-${HOSTNAME:-example} - - ## node_exporter - # node_exporter: {} - - ## process - # process: - # process_names: - # - name: "{{.Comm}}" - # cmdline: - # - '.+' - - ## mysql (requires docker-compose mysql profile) - # mysql_configs: - # - data_source_name: root@(${MYSQL_HOST:-localhost:3306})/ - - ## postgres (requires docker-compose postgres profile) - # postgres_configs: - # - data_source_names: - # - postgresql://postgres:password@localhost:5432/postgres?sslmode=disable - - ## redis (requires docker-compose redis profile) - # redis_configs: - # - redis_addr: ${REDIS_HOST:-localhost:6379} - - ## dnsmasq (requires docker-compose dnsmasq profile) - # dnsmasq_configs: - # - dnsmasq_address: ${DNSMASQ_HOST:-localhost:30053} - # leases_path: /tmp/dnsmasq-leases/dnsmasq.leases - - ## memcached (requires docker-compose memcached profile) - # memcached_configs: - # - memcached_address: ${MEMCACHED_HOST:-localhost:11211} - # timeout: 10s - - ## statsd - # statsd: {} - - ## consul (requires docker-compose consul profile) - # consul_configs: - # - server: http://${CONSUL_HOST:-localhost:8500} - - ## elasticsearch (requires docker-compose elasticsearch profile) - # elasticsearch_configs: - # - address: http://${ELASTICSEARCH_HOST:-localhost:9200} - - ## kafka (requires docker-compose kafka profile) - # kafka_configs: - # - kafka_uris: [${KAFKA_HOST:-localhost:9093}] - - ## github (requires docker-compose github profile) - # github_configs: - # - repositories: - # - grafana/agent - - ## mongodb (requires docker-compose mongodb profile) - # mongodb_configs: - # - mongodb_uri: mongodb://${MONGODB_HOST:-mongodb:27017} - # relabel_configs: - # - source_labels: [__address__] - # target_label: service_name - # replacement: 'mongodb' - # - source_labels: [__address__] - # target_label: mongodb_cluster - # replacement: 'mongodb-cluster' - - ## cadvisor - # cadvisor: - # disabled_metrics: - # - disk - # enabled_metrics: - # - percpu - diff --git a/example/docker-compose/docker-compose.yaml b/example/docker-compose/docker-compose.yaml deleted file mode 100644 index c3b278b4d4..0000000000 --- a/example/docker-compose/docker-compose.yaml +++ /dev/null @@ -1,233 +0,0 @@ -version: "3" -services: - # - # Core services. These services allow a Grafana Agent to send data somewhere - # and visualize it in Grafana. - # - # Backends: grafana, loki, mimir, tempo - # Example services: avalanche - # - - grafana: - image: grafana/grafana:10.0.3 - entrypoint: - - /usr/share/grafana/bin/grafana-server - - --homepath=/usr/share/grafana - - --config=/etc/grafana-config/grafana.ini - volumes: - - ./grafana/config:/etc/grafana-config - - ./grafana/datasources:/etc/grafana/provisioning/datasources - - ./grafana/dashboards-provisioning:/etc/grafana/provisioning/dashboards - - ./grafana/dashboards:/var/lib/grafana/dashboards - ports: - - "3000:3000" - - loki: - image: grafana/loki:2.8.3 - command: -config.file=/etc/loki/local-config.yaml - ports: - - "3100:3100" - - mimir: - image: grafana/mimir:2.9.0 - volumes: - - ./mimir/config:/etc/mimir-config - entrypoint: - - /bin/mimir - - -config.file=/etc/mimir-config/mimir.yaml - ports: - - "9009:9009" - - tempo: - image: grafana/tempo:2.1.0 - command: - - "-storage.trace.backend=local" # tell tempo where to permanently put traces - - "-storage.trace.local.path=/tmp/tempo/traces" - - "-storage.trace.wal.path=/tmp/tempo/wal" # tell tempo where to store the wal - - "-auth.enabled=false" # disables the requirement for the X-Scope-OrgID header - - "-server.http-listen-port=3200" - ports: - - "3200:3200" - - "4317:4317" - - avalanche: - image: quay.io/freshtracks.io/avalanche:latest - command: - - --metric-count=3000 - - --series-interval=3600 - - --metric-interval=7200 - ports: - - "9001:9001" - - hotrod: - profiles: [agent] - image: yurishkuro/microsim:latest - ports: - - "8080:8080" - command: - - "-j=http://agent:14268/api/traces" - - "-d=1h" - depends_on: - - agent - - # - # Optional Grafana Agent which can collect telemetry and send it to - # Loki/Mimir/Tempo. - # - # Enable with the "agent" profile. - # - - agent: - profiles: [agent] - image: grafana/agent:latest - volumes: - - ./agent/config:/etc/agent-config - entrypoint: - - /bin/grafana-agent - - -server.http.address=0.0.0.0:12345 - - -config.file=/etc/agent-config/agent.yaml - - -metrics.wal-directory=/tmp/agent/wal - - -enable-features=integrations-next - - -config.expand-env - - -config.enable-read-api - environment: - HOSTNAME: agent - REMOTE_WRITE_HOST: mimir:9009 - LOKI_HOST: loki:3100 - TEMPO_HOST: tempo:4317 - AVALANCHE_HOST: avalanche:9001 - MYSQL_HOST: mysql:3306 - POSTGRES_HOST: postgres:5432 - REDIS_HOST: redis:6379 - DNSMASQ_HOST: dnsmasq:53 - MEMCACHED_HOST: memcached:11211 - CONSUL_HOST: consul:8500 - ELASTICSEARCH_HOST: elasticsearch:9200 - KAFKA_HOST: kafka:9093 - MONGODB_HOST: mongodb:27017 - ports: - - "12345:12345" - depends_on: - - mimir - - loki - - tempo - - # - # Integrations. These services act as sample SUOs that you can test - # integrations against. - # - # They are disabled by default. Enable the "integrations" profile to enable - # all of them, or pass an integration by name (i.e., mysql) to enable a - # specific one. - # - - mysql: - profiles: [integrations,mysql] - image: mysql/mysql-server:5.7 - environment: - - MYSQL_ALLOW_EMPTY_PASSWORD=yes - - MYSQL_ROOT_HOST=% - ports: - - 127.0.0.1:3306:3306 - - postgres: - profiles: [integrations,postgres] - image: postgres:13.0 - environment: - - POSTGRES_USER=postgres - - POSTGRES_PASSWORD=password - ports: - - 5432:5432 - - redis: - profiles: [integrations,redis] - image: redis:6 - ports: - - "6379:6379" - - dnsmasq: - profiles: [integrations,dnsmasq] - image: andyshinn/dnsmasq:2.81 - cap_add: [NET_ADMIN] - volumes: - - /tmp/dnsmasq-leases:/var/lib/misc - ports: - - "30053:53/udp" - - memcached: - profiles: [integrations,memcached] - image: memcached - ports: - - "11211:11211" - - consul: - profiles: [integrations,consul] - image: hashicorp/consul - ports: - - "8500:8500" - - elasticsearch: - profiles: [integrations,elasticsearch] - image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1 - environment: - - node.name=elasticsearch - - cluster.name=es-grafana-agent-cluster - - discovery.type=single-node - volumes: - - elasticsearch_data:/usr/share/elasticsearch/data - ports: - - "9200:9200" - - zookeeper: - profiles: [integrations,zookeeper] - image: wurstmeister/zookeeper:3.4.6 - expose: - - "2181" - restart: always - - kafka: - profiles: [integrations,kafka] - image: wurstmeister/kafka:2.12-2.3.0 - depends_on: - - zookeeper - ports: - - "127.0.0.1:9093:9093" - expose: - - "9092" - - "9094" - environment: - KAFKA_CREATE_TOPICS: "sarama_topic:2:1" - KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9092,OUTSIDE://127.0.0.1:9093,DOCKER://kafka:9094 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT,DOCKER:PLAINTEXT - KAFKA_LISTENERS: INSIDE://kafka:9092,OUTSIDE://:9093,DOCKER://kafka:9094 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE - restart: always - - kafka-producer: - profiles: [integrations,kafka] - image: gaantunes/kafka-client:latest - depends_on: - - kafka - command: - --producer --kafka.server kafka:9094 - restart: always - - kafka-consumer: - profiles: [integrations,kafka] - image: gaantunes/kafka-client:latest - depends_on: - - kafka - command: - --consumer --kafka.server kafka:9094 - restart: always - - mongodb: - profiles: [integrations,mongodb] - image: mongo:4.2 - ports: - - "127.0.0.1:27017:27017" - -volumes: - elasticsearch_data: - driver: local diff --git a/example/docker-compose/grafana/config/grafana.ini b/example/docker-compose/grafana/config/grafana.ini deleted file mode 100644 index ba1078c518..0000000000 --- a/example/docker-compose/grafana/config/grafana.ini +++ /dev/null @@ -1,9 +0,0 @@ -[analytics] -reporting_enabled = false -[auth.anonymous] -enabled = true -org_role = Admin -[explore] -enabled = true -[users] -default_theme = dark diff --git a/example/docker-compose/grafana/dashboards-provisioning/dashboards.yaml b/example/docker-compose/grafana/dashboards-provisioning/dashboards.yaml deleted file mode 100644 index c038adf8e3..0000000000 --- a/example/docker-compose/grafana/dashboards-provisioning/dashboards.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: 1 - -providers: -- name: 'dashboards' - orgId: 1 - folder: '' - folderUid: '' - type: file - disableDeletion: true - editable: true - updateIntervalSeconds: 10 - allowUiUpdates: false - options: - path: /var/lib/grafana/dashboards diff --git a/example/docker-compose/grafana/dashboards/agent-logs-pipeline.json b/example/docker-compose/grafana/dashboards/agent-logs-pipeline.json deleted file mode 100644 index 305136a5dd..0000000000 --- a/example/docker-compose/grafana/dashboards/agent-logs-pipeline.json +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "__inputs": [ ], - "__requires": [ ], - "annotations": { - "list": [ ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ ], - "refresh": "30s", - "rows": [ - { - "collapse": false, - "collapsed": false, - "height": 500, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 2, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (rate(promtail_dropped_bytes_total{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Dropped bytes rate [B/s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 3, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (rate(promtail_request_duration_seconds_bucket{status_code=~\"2..\", cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n/\nsum by($groupBy) (rate(promtail_request_duration_seconds_bucket{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n* 100\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Write requests success rate [%]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "%", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "%", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Errors", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "height": 500, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 4, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.990000, \n sum by (le, $groupBy)\n (rate(promtail_request_duration_seconds_bucket{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Write latencies p99 [s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 5, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.900000, \n sum by (le, $groupBy)\n (rate(promtail_request_duration_seconds_bucket{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Write latencies p90 [s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 6, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.500000, \n sum by (le, $groupBy)\n (rate(promtail_request_duration_seconds_bucket{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Write latencies p50 [s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 7, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(sum by (le, $groupBy) (rate(promtail_request_duration_seconds_sum{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval])))\n/\n(sum by (le, $groupBy) (rate(promtail_request_duration_seconds_count{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval])))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Write latencies average [s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Latencies", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "height": 500, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 8, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (rate(promtail_read_bytes_total{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Bytes read rate [B/s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 9, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (rate(promtail_read_lines_total{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Lines read rate [lines/s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 10, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (promtail_files_active_total{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"})\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Active files count", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 11, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by($groupBy) (rate(promtail_sent_entries_total{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{$groupBy}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Entries sent rate [entries/s]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Logs volume", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "grafana-agent-mixin" - ], - "templating": { - "list": [ - { - "hide": 0, - "label": null, - "name": "datasource", - "options": [ ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "cluster", - "options": [ ], - "query": "label_values(agent_build_info, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "namespace", - "options": [ ], - "query": "label_values(agent_build_info, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "container", - "options": [ ], - "query": "label_values(agent_build_info, container)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "pod", - "options": [ ], - "query": "label_values(agent_build_info{container=~\"$container\"}, pod)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "pod", - "value": "pod" - }, - "hide": 0, - "includeAll": false, - "label": "", - "multi": false, - "name": "groupBy", - "options": [ - { - "text": "pod", - "value": "pod" - }, - { - "text": "cluster", - "value": "cluster" - }, - { - "text": "namespace", - "value": "namespace" - } - ], - "query": "pod,cluster,namespace", - "refresh": 0, - "type": "custom" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Agent Logs Pipeline", - "version": 0 -} diff --git a/example/docker-compose/grafana/dashboards/agent-operational.json b/example/docker-compose/grafana/dashboards/agent-operational.json deleted file mode 100644 index 6cc2a65a47..0000000000 --- a/example/docker-compose/grafana/dashboards/agent-operational.json +++ /dev/null @@ -1,1189 +0,0 @@ -{ - "annotations": { - "list": [ ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ ], - "refresh": "30s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(go_gc_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "GCs [count/s]", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Go Heap In Use", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "decbytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "go_goroutines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Goroutines", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage [time/s]", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Working Set Size", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "decbytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(promtail_custom_bad_words_total{cluster=~\"$cluster\", exported_namespace=~\"$namespace\", exported_job=~\"$job\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Promtail Bad Words", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "General", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (pod) (rate(container_network_receive_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Received Bytes [B/s]", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (pod) (rate(container_network_transmit_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Transmitted Bytes [B/s]", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(sum by (pod) (avg_over_time(go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}[1m])))\n/\n(sum by (pod) (agent_wal_storage_active_series{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Heap Used per Series per Pod", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "decbytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(sum by (container) (avg_over_time(go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}[1m])))\n/\n(sum by (container) (agent_wal_storage_active_series{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"}))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{container}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Avg Heap Used per Series", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "decbytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 11, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (pod) (agent_wal_storage_active_series{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Series Count per Pod", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (instance_group_name) (agent_wal_storage_active_series{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{instance_group_name}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Series per Config", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 13, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (container) (agent_wal_storage_active_series{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\", pod=~\"$pod\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{container}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Total Series", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Prometheus Read", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "grafana-agent-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [ ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "cluster", - "multi": true, - "name": "cluster", - "options": [ ], - "query": "label_values(agent_build_info, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "namespace", - "multi": true, - "name": "namespace", - "options": [ ], - "query": "label_values(agent_build_info{cluster=~\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "container", - "multi": true, - "name": "container", - "options": [ ], - "query": "label_values(agent_build_info{cluster=~\"$cluster\", namespace=\"$namespace\"}, container)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "pod", - "multi": true, - "name": "pod", - "options": [ ], - "query": "label_values(agent_build_info{cluster=~\"$cluster\", namespace=\"$namespace\", container=\"$container\"}, pod)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Agent Operational", - "uid": "", - "version": 0 -} diff --git a/example/docker-compose/grafana/dashboards/agent-remote-write.json b/example/docker-compose/grafana/dashboards/agent-remote-write.json deleted file mode 100644 index f222774607..0000000000 --- a/example/docker-compose/grafana/dashboards/agent-remote-write.json +++ /dev/null @@ -1,1512 +0,0 @@ -{ - "__inputs": [ ], - "__requires": [ ], - "annotations": { - "list": [ ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ ], - "refresh": "30s", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 2, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(\n prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}\n -\n ignoring(url, remote_name) group_right(pod)\n prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Highest Timestamp In vs. Highest Timestamp Sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 3, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]) / rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "mean {{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.99, rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "p99 {{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "B" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Latency [1m]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Timestamps", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 4, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(agent_wal_samples_appended_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Rate in [5m]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 5, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_succeeded_samples_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Rate succeeded [5m]", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 6, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_samples_pending{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Pending Samples", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 7, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_samples_dropped_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Dropped Samples", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 8, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_samples_failed_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Failed Samples", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 9, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_samples_retried_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Retried Samples", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Samples", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 10, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "minSpan": 6, - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_shards{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Current Shards", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 11, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_shards_max{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Max Shards", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 12, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_shards_min{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Min Shards", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 13, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_shards_desired{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Desired Shards", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Shards", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 14, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_remote_storage_shard_capacity{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Shard Capacity", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Shard Details", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 15, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "prometheus_wal_watcher_current_segment{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Remote Write Current Segment", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Segments", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 16, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_remote_storage_enqueue_retries_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Enqueue Retries", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Misc. Rates", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "grafana-agent-mixin" - ], - "templating": { - "list": [ - { - "hide": 0, - "label": null, - "name": "datasource", - "options": [ ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "cluster", - "options": [ ], - "query": "label_values(agent_build_info, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "namespace", - "options": [ ], - "query": "label_values(agent_build_info, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "container", - "options": [ ], - "query": "label_values(agent_build_info, container)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "pod", - "options": [ ], - "query": "label_values(agent_build_info{container=~\"$container\"}, pod)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "url", - "options": [ ], - "query": "label_values(prometheus_remote_storage_shards{cluster=~\"$cluster\", pod=~\"$pod\"}, url)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Agent Prometheus Remote Write", - "version": 0 -} diff --git a/example/docker-compose/grafana/dashboards/agent-tracing-pipeline.json b/example/docker-compose/grafana/dashboards/agent-tracing-pipeline.json deleted file mode 100644 index 3e9415b919..0000000000 --- a/example/docker-compose/grafana/dashboards/agent-tracing-pipeline.json +++ /dev/null @@ -1,1065 +0,0 @@ -{ - "__inputs": [ ], - "__requires": [ ], - "annotations": { - "list": [ ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ ], - "refresh": "30s", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 2, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(traces_receiver_accepted_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver!=\"otlp/lb\"}[$__rate_interval])\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }} - {{ receiver }}/{{ transport }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Accepted spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 3, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(traces_receiver_refused_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver!=\"otlp/lb\"}[$__rate_interval])\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }} - {{ receiver }}/{{ transport }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Refused spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 4, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(traces_exporter_sent_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter!=\"otlp\"}[$__rate_interval])\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }} - {{ exporter }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Exported spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 5, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(traces_exporter_send_failed_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter!=\"otlp\"}[$__rate_interval])\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }} - {{ exporter }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Exported failed spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 6, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(traces_receiver_accepted_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver!=\"otlp/lb\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Accepted", - "refId": "A" - }, - { - "expr": "sum(rate(traces_receiver_refused_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver!=\"otlp/lb\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Refused", - "refId": "B" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Received spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 7, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(traces_exporter_sent_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter!=\"otlp\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Sent", - "refId": "A" - }, - { - "expr": "sum(rate(traces_exporter_send_failed_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter!=\"otlp\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Send failed", - "refId": "B" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Exported spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Write / Read", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 8, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(traces_loadbalancer_backend_outcome{cluster=~\"$cluster\",namespace=~\"$namespace\",success=\"true\",container=~\"$container\",pod=~\"$pod\"}[$__rate_interval])\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Load-balanced spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "fillGradient": 0, - "gridPos": { }, - "id": 9, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "traces_loadbalancer_num_backends{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\"}\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{ pod }}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Number of peers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 10, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(traces_receiver_accepted_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver=\"otlp/lb\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Accepted", - "refId": "A" - }, - { - "expr": "sum(rate(traces_receiver_refused_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",receiver=\"otlp/lb\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Refused", - "refId": "B" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Received spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fillGradient": 0, - "gridPos": { }, - "id": 11, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(traces_exporter_sent_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter=\"otlp\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Sent", - "refId": "A" - }, - { - "expr": "sum(rate(traces_exporter_send_failed_spans{cluster=~\"$cluster\",namespace=~\"$namespace\",container=~\"$container\",pod=~\"$pod\",exporter=\"otlp\"}[$__rate_interval]))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Send failed", - "refId": "B" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Exported spans", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Load balancing", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "grafana-agent-mixin" - ], - "templating": { - "list": [ - { - "hide": 0, - "label": null, - "name": "datasource", - "options": [ ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "cluster", - "options": [ ], - "query": "label_values(agent_build_info, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "namespace", - "options": [ ], - "query": "label_values(agent_build_info, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "container", - "options": [ ], - "query": "label_values(agent_build_info, container)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "value": { - "selected": true, - "text": "All", - "value": "$__all" - } - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": null, - "multi": false, - "name": "pod", - "options": [ ], - "query": "label_values(agent_build_info{container=~\"$container\"}, pod)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Agent Tracing Pipeline", - "version": 0 -} diff --git a/example/docker-compose/grafana/dashboards/agent.json b/example/docker-compose/grafana/dashboards/agent.json deleted file mode 100644 index 768fccb011..0000000000 --- a/example/docker-compose/grafana/dashboards/agent.json +++ /dev/null @@ -1,786 +0,0 @@ -{ - "annotations": { - "list": [ ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ ], - "refresh": "30s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Count", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ ], - "type": "hidden", - "unit": "short" - }, - { - "alias": "Uptime", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ ], - "type": "number", - "unit": "short" - }, - { - "alias": "Container", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "container", - "thresholds": [ ], - "type": "number", - "unit": "short" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "pod", - "thresholds": [ ], - "type": "number", - "unit": "short" - }, - { - "alias": "Version", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "version", - "thresholds": [ ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count by (pod, container, version) (agent_build_info{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"})", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "max by (pod, container) (time() - process_start_time_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"})", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Agent Stats", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Agent Stats", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])) by (pod, scrape_job) * 1e3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}/{{scrape_job}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Target Sync", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (pod) (prometheus_sd_discovered_targets{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Targets", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Prometheus Discovery", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(prometheus_target_interval_length_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])\n/\nrate(prometheus_target_interval_length_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m])\n* 1e3\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}} {{interval}} configured", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Average Scrape Interval Duration", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "exceeded sample limit: {{job}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "duplicate timestamp: {{job}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "out of bounds: {{job}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "out of order: {{job}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Scrape failures", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (job, instance_group_name) (rate(agent_wal_samples_appended_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"$container\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}} {{instance_group_name}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Appended Samples", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Prometheus Retrieval", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "grafana-agent-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": "Data Source", - "name": "datasource", - "options": [ ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "cluster", - "multi": true, - "name": "cluster", - "options": [ ], - "query": "label_values(agent_build_info, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "namespace", - "multi": true, - "name": "namespace", - "options": [ ], - "query": "label_values(agent_build_info, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "container", - "multi": true, - "name": "container", - "options": [ ], - "query": "label_values(agent_build_info, container)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": "grafana-agent-.*", - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "pod", - "multi": true, - "name": "pod", - "options": [ ], - "query": "label_values(agent_build_info{container=~\"$container\"}, pod)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Agent", - "uid": "", - "version": 0 -} diff --git a/example/docker-compose/grafana/dashboards/template.jsonnet b/example/docker-compose/grafana/dashboards/template.jsonnet deleted file mode 100644 index 157860f0a3..0000000000 --- a/example/docker-compose/grafana/dashboards/template.jsonnet +++ /dev/null @@ -1,14 +0,0 @@ -local agentDashboards = import 'agent-static-mixin/dashboards.libsonnet'; -local agentDebugging = import 'agent-static-mixin/debugging.libsonnet'; - -local result = agentDashboards + agentDebugging { - files: { - [name]: $.grafanaDashboards[name] { - // Use local timezone for local testing - timezone: '', - } - for name in std.objectFields($.grafanaDashboards) - }, -}; - -result.files diff --git a/example/docker-compose/grafana/datasources/datasource.yml b/example/docker-compose/grafana/datasources/datasource.yml deleted file mode 100644 index 0a1efc20bc..0000000000 --- a/example/docker-compose/grafana/datasources/datasource.yml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: 1 - -deleteDatasources: - - name: Mimir - -datasources: -- name: Mimir - type: prometheus - access: proxy - orgId: 1 - url: http://mimir:9009/prometheus - basicAuth: false - isDefault: false - version: 1 - editable: false -- name: Tempo - type: tempo - access: proxy - orgId: 1 - url: http://tempo:3200 - basicAuth: false - isDefault: false - version: 1 - editable: false - apiVersion: 1 - uid: tempo -- name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - jsonData: - derivedFields: - - datasourceUid: tempo - matcherRegex: tid=(\w+) - name: TraceID - url: $${__value.raw} - diff --git a/example/docker-compose/jsonnetfile.json b/example/docker-compose/jsonnetfile.json deleted file mode 100644 index bee24c1dcf..0000000000 --- a/example/docker-compose/jsonnetfile.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "local": { - "directory": "../../operations/agent-static-mixin" - } - }, - "version": "" - } - ], - "legacyImports": true -} diff --git a/example/docker-compose/jsonnetfile.lock.json b/example/docker-compose/jsonnetfile.lock.json deleted file mode 100644 index 463fc7a677..0000000000 --- a/example/docker-compose/jsonnetfile.lock.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "git": { - "remote": "https://github.com/grafana/grafonnet-lib.git", - "subdir": "grafonnet" - } - }, - "version": "3626fc4dc2326931c530861ac5bebe39444f6cbf", - "sum": "gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "grafana-builder" - } - }, - "version": "4452566af0a58f25cda10b3e568fac979fda85c3", - "sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc=" - }, - { - "source": { - "local": { - "directory": "../../operations/agent-static-mixin" - } - }, - "version": "" - } - ], - "legacyImports": false -} diff --git a/example/docker-compose/mimir/config/mimir.yaml b/example/docker-compose/mimir/config/mimir.yaml deleted file mode 100644 index e48447f24c..0000000000 --- a/example/docker-compose/mimir/config/mimir.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# Do not use this configuration in production. -# It is for demonstration purposes only. -multitenancy_enabled: false - -activity_tracker: {} - -alertmanager: {} - -alertmanager_storage: - backend: local - -server: - http_listen_port: 9009 - - # Configure the server to allow messages up to 100MB. - grpc_server_max_recv_msg_size: 104857600 - grpc_server_max_send_msg_size: 104857600 - grpc_server_max_concurrent_streams: 1000 - -distributor: - pool: - health_check_ingesters: true - -ingester_client: - grpc_client_config: - grpc_compression: gzip - max_recv_msg_size: 104857600 - max_send_msg_size: 104857600 - -ingester: - ring: - final_sleep: 0s - kvstore: - store: inmemory - min_ready_duration: 0s - num_tokens: 512 - replication_factor: 1 - -blocks_storage: - backend: filesystem - bucket_store: - sync_dir: /tmp/mimir/tsdb-sync - filesystem: - dir: /tmp/mimir/blocks - tsdb: - dir: /tmp/mimir/tsdb - -compactor: - sharding_ring: - kvstore: - store: inmemory - -ruler: - enable_api: true - -ruler_storage: - backend: filesystem - local: - directory: /tmp/mimir/rules - -limits: - ingestion_burst_size: 500000 - ingestion_rate: 250000 diff --git a/internal/cmd/integration-tests/utils.go b/internal/cmd/integration-tests/utils.go index a7d94650a7..4b4291f3a2 100644 --- a/internal/cmd/integration-tests/utils.go +++ b/internal/cmd/integration-tests/utils.go @@ -12,7 +12,7 @@ import ( ) const ( - agentBinaryPath = "../../../../../build/grafana-agent-flow" + agentBinaryPath = "../../../../../build/grafana-agent" ) type TestLog struct { @@ -34,7 +34,7 @@ func executeCommand(command string, args []string, taskDescription string) { } func buildAgent() { - executeCommand("make", []string{"-C", "../../..", "agent-flow"}, "Building agent") + executeCommand("make", []string{"-C", "../../..", "agent"}, "Building agent") } func setupEnvironment() { diff --git a/internal/static/operator/apis/monitoring/doc.go b/internal/static/operator/apis/monitoring/doc.go deleted file mode 100644 index ec1cd225ee..0000000000 --- a/internal/static/operator/apis/monitoring/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// File needed for gen-crd-api-reference-docs tool -// to generate CRD docs reference. - -package monitoring diff --git a/internal/static/operator/apis/monitoring/v1alpha1/deployment.go b/internal/static/operator/apis/monitoring/v1alpha1/deployment.go deleted file mode 100644 index 0b8d2d1503..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/deployment.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1alpha1 - -import ( - "github.com/grafana/agent/internal/static/operator/assets" - promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" -) - -// +genclient - -// Deployment is a set of discovered resources relative to a GrafanaAgent. The -// tree of resources contained in a Deployment form the resource hierarchy used -// for reconciling a GrafanaAgent. -type Deployment struct { - // Root resource in the deployment. - Agent *GrafanaAgent - // Metrics resources discovered by Agent. - Metrics []MetricsDeployment - // Logs resources discovered by Agent. - Logs []LogsDeployment - // Integrations resources discovered by Agent. - Integrations []IntegrationsDeployment - // The full list of Secrets referenced by resources in the Deployment. - Secrets assets.SecretStore -} - -// +genclient - -// MetricsDeployment is a set of discovered resources relative to a -// MetricsInstance. -type MetricsDeployment struct { - Instance *MetricsInstance - ServiceMonitors []*promv1.ServiceMonitor - PodMonitors []*promv1.PodMonitor - Probes []*promv1.Probe -} - -// +genclient - -// LogsDeployment is a set of discovered resources relative to a LogsInstance. -type LogsDeployment struct { - Instance *LogsInstance - PodLogs []*PodLogs -} - -// +genclient - -// IntegrationsDeployment is a set of discovered resources relative to an -// IntegrationsDeployment. -type IntegrationsDeployment struct { - Instance *Integration - - // NOTE(rfratto): Integration doesn't have any children resources, but we - // define a *Deployment type for consistency with Metrics and Logs. -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/doc.go b/internal/static/operator/apis/monitoring/v1alpha1/doc.go deleted file mode 100644 index 5fcb427f30..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// File needed for gen-crd-api-reference-docs tool -// to generate CRD docs reference. - -// +kubebuilder:object:generate=true -// +groupName=monitoring.grafana.com - -package v1alpha1 diff --git a/internal/static/operator/apis/monitoring/v1alpha1/group.go b/internal/static/operator/apis/monitoring/v1alpha1/group.go deleted file mode 100644 index f66ff4833d..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/group.go +++ /dev/null @@ -1,36 +0,0 @@ -// +kubebuilder:object:generate=true -// +groupName=monitoring.grafana.com - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // SchemeGroupVersion is the group version used to register CRDs for this - // package. - SchemeGroupVersion = schema.GroupVersion{Group: "monitoring.grafana.com", Version: "v1alpha1"} - - // SchemeBuilder is used to add Go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - - // AddToScheme is required by client packages. - AddToScheme = SchemeBuilder.AddToScheme -) - -func init() { - SchemeBuilder.Register( - &GrafanaAgent{}, - &GrafanaAgentList{}, - &MetricsInstance{}, - &MetricsInstanceList{}, - &LogsInstance{}, - &LogsInstanceList{}, - &PodLogs{}, - &PodLogsList{}, - &Integration{}, - &IntegrationList{}, - ) -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/types.go b/internal/static/operator/apis/monitoring/v1alpha1/types.go deleted file mode 100644 index 5b54b74f3a..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/types.go +++ /dev/null @@ -1,206 +0,0 @@ -package v1alpha1 - -import ( - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// +genclient -// +kubebuilder:object:root=true -// +kubebuilder:resource:path="grafanaagents" -// +kubebuilder:resource:singular="grafanaagent" -// +kubebuilder:resource:categories="agent-operator" - -// GrafanaAgent defines a Grafana Agent deployment. -type GrafanaAgent struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the specification of the desired behavior for the Grafana Agent - // cluster. - Spec GrafanaAgentSpec `json:"spec,omitempty"` -} - -// MetricsInstanceSelector returns a selector to find MetricsInstances. -func (a *GrafanaAgent) MetricsInstanceSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &MetricsInstance{}, - ParentNamespace: a.Namespace, - NamespaceSelector: a.Spec.Metrics.InstanceNamespaceSelector, - Labels: a.Spec.Metrics.InstanceSelector, - } -} - -// LogsInstanceSelector returns a selector to find LogsInstances. -func (a *GrafanaAgent) LogsInstanceSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &LogsInstance{}, - ParentNamespace: a.Namespace, - NamespaceSelector: a.Spec.Logs.InstanceNamespaceSelector, - Labels: a.Spec.Logs.InstanceSelector, - } -} - -// IntegrationsSelector returns a selector to find Integrations. -func (a *GrafanaAgent) IntegrationsSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &Integration{}, - ParentNamespace: a.Namespace, - NamespaceSelector: a.Spec.Integrations.NamespaceSelector, - Labels: a.Spec.Integrations.Selector, - } -} - -// +kubebuilder:object:root=true - -// GrafanaAgentList is a list of GrafanaAgents. -type GrafanaAgentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - // Items is the list of GrafanaAgents. - Items []*GrafanaAgent `json:"items"` -} - -// GrafanaAgentSpec is a specification of the desired behavior of the Grafana -// Agent cluster. -type GrafanaAgentSpec struct { - // LogLevel controls the log level of the generated pods. Defaults to "info" if not set. - LogLevel string `json:"logLevel,omitempty"` - // LogFormat controls the logging format of the generated pods. Defaults to "logfmt" if not set. - LogFormat string `json:"logFormat,omitempty"` - // APIServerConfig lets you specify a host and auth methods to access the - // Kubernetes API server. If left empty, the Agent assumes that it is - // running inside of the cluster and will discover API servers automatically - // and use the pod's CA certificate and bearer token file at - // /var/run/secrets/kubernetes.io/serviceaccount. - APIServerConfig *prom_v1.APIServerConfig `json:"apiServer,omitempty"` - // PodMetadata configures Labels and Annotations which are propagated to - // created Grafana Agent pods. - PodMetadata *prom_v1.EmbeddedObjectMetadata `json:"podMetadata,omitempty"` - // Version of Grafana Agent to be deployed. - Version string `json:"version,omitempty"` - // Paused prevents actions except for deletion to be performed on the - // underlying managed objects. - Paused bool `json:"paused,omitempty"` - // Image, when specified, overrides the image used to run Agent. Specify - // the image along with a tag. You still need to set the version to ensure - // Grafana Agent Operator knows which version of Grafana Agent is being - // configured. - Image *string `json:"image,omitempty"` - - // Version of Config Reloader to be deployed. - ConfigReloaderVersion string `json:"configReloaderVersion,omitempty"` - // Image, when specified, overrides the image used to run Config Reloader. Specify - // the image along with a tag. You still need to set the version to ensure - // Grafana Agent Operator knows which version of Grafana Agent is being - // configured. - ConfigReloaderImage *string `json:"configReloaderImage,omitempty"` - - // ImagePullSecrets holds an optional list of references to Secrets within - // the same namespace used for pulling the Grafana Agent image from - // registries. - // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - // Storage spec to specify how storage will be used. - Storage *prom_v1.StorageSpec `json:"storage,omitempty"` - // Volumes allows configuration of additional volumes on the output - // StatefulSet definition. The volumes specified are appended to other - // volumes that are generated as a result of StorageSpec objects. - Volumes []v1.Volume `json:"volumes,omitempty"` - // VolumeMounts lets you configure additional VolumeMounts on the output - // StatefulSet definition. Specified VolumeMounts are appended to other - // VolumeMounts generated as a result of StorageSpec objects - // in the Grafana Agent container. - VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` - // Resources holds requests and limits for individual pods. - Resources v1.ResourceRequirements `json:"resources,omitempty"` - // NodeSelector defines which nodes pods should be scheduling on. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // ServiceAccountName is the name of the ServiceAccount to use for running Grafana Agent pods. - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // Secrets is a list of secrets in the same namespace as the GrafanaAgent - // object which will be mounted into each running Grafana Agent pod. - // The secrets are mounted into /var/lib/grafana-agent/extra-secrets/. - Secrets []string `json:"secrets,omitempty"` - // ConfigMaps is a list of config maps in the same namespace as the - // GrafanaAgent object which will be mounted into each running Grafana Agent - // pod. - // The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/. - ConfigMaps []string `json:"configMaps,omitempty"` - // Affinity, if specified, controls pod scheduling constraints. - Affinity *v1.Affinity `json:"affinity,omitempty"` - // Tolerations, if specified, controls the pod's tolerations. - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // TopologySpreadConstraints, if specified, controls the pod's topology spread constraints. - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` - // SecurityContext holds pod-level security attributes and common container - // settings. When unspecified, defaults to the default PodSecurityContext. - SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` - // Containers lets you inject additional containers or modify operator-generated - // containers. This can be used to add an authentication - // proxy to a Grafana Agent pod or to change the behavior of an - // operator-generated container. Containers described here modify an - // operator-generated container if they share the same name and if modifications are done - // via a strategic merge patch. The current container names are: - // `grafana-agent` and `config-reloader`. Overriding containers is entirely - // outside the scope of what the Grafana Agent team supports and by doing - // so, you accept that this behavior may break at any time without notice. - Containers []v1.Container `json:"containers,omitempty"` - // InitContainers let you add initContainers to the pod definition. These - // can be used to, for example, fetch secrets for injection into the Grafana - // Agent configuration from external sources. Errors during the execution - // of an initContainer cause the pod to restart. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // Using initContainers for any use case other than secret fetching is - // entirely outside the scope of what the Grafana Agent maintainers - // support and by doing so, you accept that this behavior may break at any - // time without notice. - InitContainers []v1.Container `json:"initContainers,omitempty"` - // PriorityClassName is the priority class assigned to pods. - PriorityClassName string `json:"priorityClassName,omitempty"` - - // RuntimeClassName is the runtime class assigned to pods. - RuntimeClassName *string `json:"runtimeClassName,omitempty"` - - // Port name used for the pods and governing service. This defaults to agent-metrics. - PortName string `json:"portName,omitempty"` - - // Metrics controls the metrics subsystem of the Agent and settings - // unique to metrics-specific pods that are deployed. - Metrics MetricsSubsystemSpec `json:"metrics,omitempty"` - - // Logs controls the logging subsystem of the Agent and settings unique to - // logging-specific pods that are deployed. - Logs LogsSubsystemSpec `json:"logs,omitempty"` - - // Integrations controls the integration subsystem of the Agent and settings - // unique to deployed integration-specific pods. - Integrations IntegrationsSubsystemSpec `json:"integrations,omitempty"` - - // enableConfigReadAPI enables the read API for viewing the currently running - // config port 8080 on the agent. - // +kubebuilder:default=false - EnableConfigReadAPI bool `json:"enableConfigReadAPI,omitempty"` - - // disableReporting disables reporting of enabled feature flags to Grafana. - // +kubebuilder:default=false - DisableReporting bool `json:"disableReporting,omitempty"` - - // disableSupportBundle disables the generation of support bundles. - // +kubebuilder:default=false - DisableSupportBundle bool `json:"disableSupportBundle,omitempty"` -} - -// +kubebuilder:object:generate=false - -// ObjectSelector is a set of selectors to use for finding an object in the -// resource hierarchy. When NamespaceSelector is nil, search for objects -// directly in the ParentNamespace. -type ObjectSelector struct { - ObjectType client.Object - ParentNamespace string - NamespaceSelector *metav1.LabelSelector - Labels *metav1.LabelSelector -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/types_integrations.go b/internal/static/operator/apis/monitoring/v1alpha1/types_integrations.go deleted file mode 100644 index 9a6035b41e..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/types_integrations.go +++ /dev/null @@ -1,134 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// IntegrationsSubsystemSpec defines global settings to apply across the -// integrations subsystem. -type IntegrationsSubsystemSpec struct { - // Label selector to find Integration resources to run. When nil, no - // integration resources will be defined. - Selector *metav1.LabelSelector `json:"selector,omitempty"` - - // Label selector for namespaces to search when discovering integration - // resources. If nil, integration resources are only discovered in the - // namespace of the GrafanaAgent resource. - // - // Set to `{}` to search all namespaces. - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path="integrations" -// +kubebuilder:resource:singular="integration" -// +kubebuilder:resource:categories="agent-operator" - -// Integration runs a single Grafana Agent integration. Integrations that -// generate telemetry must be configured to send that telemetry somewhere, such -// as autoscrape for exporter-based integrations. -// -// Integrations have access to the LogsInstances and MetricsInstances in the -// same GrafanaAgent resource set, referenced by the / of the -// Instance resource. -// -// For example, if there is a default/production MetricsInstance, you can -// configure a supported integration's autoscrape block with: -// -// autoscrape: -// enable: true -// metrics_instance: default/production -// -// There is currently no way for telemetry created by an Operator-managed -// integration to be collected from outside of the integration itself. -type Integration struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Specifies the desired behavior of the Integration. - Spec IntegrationSpec `json:"spec,omitempty"` -} - -// IntegrationSpec specifies the desired behavior of a metrics -// integration. -type IntegrationSpec struct { - // Name of the integration to run (e.g., "node_exporter", "mysqld_exporter"). - Name string `json:"name"` - - // Type informs Grafana Agent Operator about how to manage the integration being - // configured. - Type IntegrationType `json:"type"` - - // +kubebuilder:validation:Type=object - - // The configuration for the named integration. Note that Integrations are - // deployed with the integrations-next feature flag, which has different - // common settings: - // - // https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/ - Config apiextv1.JSON `json:"config"` - - // An extra list of Volumes to be associated with the Grafana Agent pods - // running this integration. Volume names are mutated to be unique across - // all Integrations. Note that the specified volumes should be able to - // tolerate existing on multiple pods at once when type is daemonset. - // - // Don't use volumes for loading Secrets or ConfigMaps from the same namespace - // as the Integration; use the Secrets and ConfigMaps fields instead. - Volumes []corev1.Volume `json:"volumes,omitempty"` - - // An extra list of VolumeMounts to be associated with the Grafana Agent pods - // running this integration. VolumeMount names are mutated to be unique - // across all used IntegrationSpecs. - // - // Mount paths should include the namespace/name of the Integration CR to - // avoid potentially colliding with other resources. - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` - - // An extra list of keys from Secrets in the same namespace as the - // Integration which will be mounted into the Grafana Agent pod running this - // Integration. - // - // Secrets will be mounted at - // /etc/grafana-agent/integrations/secrets///. - Secrets []corev1.SecretKeySelector `json:"secrets,omitempty"` - - // An extra list of keys from ConfigMaps in the same namespace as the - // Integration which will be mounted into the Grafana Agent pod running this - // Integration. - // - // ConfigMaps are mounted at - // /etc/grafana-agent/integrations/configMaps///. - ConfigMaps []corev1.ConfigMapKeySelector `json:"configMaps,omitempty"` -} - -// IntegrationType determines specific behaviors of a configured integration. -type IntegrationType struct { - // +kubebuilder:validation:Optional - - // When true, the configured integration should be run on every Node in the - // cluster. This is required for Integrations that generate Node-specific - // metrics like node_exporter, otherwise it must be false to avoid generating - // duplicate metrics. - AllNodes bool `json:"allNodes"` - - // +kubebuilder:validation:Optional - - // Whether this integration can only be defined once for a Grafana Agent - // process, such as statsd_exporter. It is invalid for a GrafanaAgent to - // discover multiple unique Integrations with the same Integration name - // (i.e., a single GrafanaAgent cannot deploy two statsd_exporters). - Unique bool `json:"unique"` -} - -// +kubebuilder:object:root=true - -// IntegrationList is a list of Integration. -type IntegrationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - // Items is the list of Integration. - Items []*Integration `json:"items"` -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/types_logs.go b/internal/static/operator/apis/monitoring/v1alpha1/types_logs.go deleted file mode 100644 index ae8de88ffd..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/types_logs.go +++ /dev/null @@ -1,584 +0,0 @@ -package v1alpha1 - -import ( - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// LogsSubsystemSpec defines global settings to apply across the logging -// subsystem. -type LogsSubsystemSpec struct { - // A global set of clients to use when a discovered LogsInstance does not - // have any clients defined. - Clients []LogsClientSpec `json:"clients,omitempty"` - // LogsExternalLabelName is the name of the external label used to - // denote Grafana Agent cluster. Defaults to "cluster." External label will - // _not_ be added when value is set to the empty string. - LogsExternalLabelName *string `json:"logsExternalLabelName,omitempty"` - // InstanceSelector determines which LogInstances should be selected - // for running. Each instance runs its own set of Prometheus components, - // including service discovery, scraping, and remote_write. - InstanceSelector *metav1.LabelSelector `json:"instanceSelector,omitempty"` - // InstanceNamespaceSelector are the set of labels to determine which - // namespaces to watch for LogInstances. If not provided, only checks own - // namespace. - InstanceNamespaceSelector *metav1.LabelSelector `json:"instanceNamespaceSelector,omitempty"` - - // IgnoreNamespaceSelectors, if true, will ignore NamespaceSelector settings - // from the PodLogs configs, and they will only discover endpoints within - // their current namespace. - IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` - // EnforcedNamespaceLabel enforces adding a namespace label of origin for - // each metric that is user-created. The label value will always be the - // namespace of the object that is being created. - EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` -} - -// LogsClientSpec defines the client integration for logs, indicating which -// Loki server to send logs to. -type LogsClientSpec struct { - // URL is the URL where Loki is listening. Must be a full HTTP URL, including - // protocol. Required. - // Example: https://logs-prod-us-central1.grafana.net/loki/api/v1/push. - URL string `json:"url"` - // Tenant ID used by default to push logs to Loki. If omitted assumes remote - // Loki is running in single-tenant mode or an authentication layer is used - // to inject an X-Scope-OrgID header. - TenantID string `json:"tenantId,omitempty"` - // Maximum amount of time to wait before sending a batch, even if that batch - // isn't full. - BatchWait string `json:"batchWait,omitempty"` - // Maximum batch size (in bytes) of logs to accumulate before sending the - // batch to Loki. - BatchSize int `json:"batchSize,omitempty"` - // BasicAuth for the Loki server. - BasicAuth *prom_v1.BasicAuth `json:"basicAuth,omitempty"` - // Oauth2 for URL - OAuth2 *prom_v1.OAuth2 `json:"oauth2,omitempty"` - // BearerToken used for remote_write. - BearerToken string `json:"bearerToken,omitempty"` - // BearerTokenFile used to read bearer token. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // ProxyURL to proxy requests through. Optional. - ProxyURL string `json:"proxyUrl,omitempty"` - // TLSConfig to use for the client. Only used when the protocol of the URL - // is https. - TLSConfig *prom_v1.TLSConfig `json:"tlsConfig,omitempty"` - // Configures how to retry requests to Loki when a request fails. - // Defaults to a minPeriod of 500ms, maxPeriod of 5m, and maxRetries of 10. - BackoffConfig *LogsBackoffConfigSpec `json:"backoffConfig,omitempty"` - // ExternalLabels are labels to add to any time series when sending data to - // Loki. - ExternalLabels map[string]string `json:"externalLabels,omitempty"` - // Maximum time to wait for a server to respond to a request. - Timeout string `json:"timeout,omitempty"` -} - -// LogsBackoffConfigSpec configures timing for retrying failed requests. -type LogsBackoffConfigSpec struct { - // Initial backoff time between retries. Time between retries is - // increased exponentially. - MinPeriod string `json:"minPeriod,omitempty"` - // Maximum backoff time between retries. - MaxPeriod string `json:"maxPeriod,omitempty"` - // Maximum number of retries to perform before giving up a request. - MaxRetries int `json:"maxRetries,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path="logsinstances" -// +kubebuilder:resource:singular="logsinstance" -// +kubebuilder:resource:categories="agent-operator" - -// LogsInstance controls an individual logs instance within a Grafana Agent -// deployment. -type LogsInstance struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the specification of the desired behavior for the logs - // instance. - Spec LogsInstanceSpec `json:"spec,omitempty"` -} - -// PodLogsSelector returns the selector to discover PodLogs. -func (i *LogsInstance) PodLogsSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &PodLogs{}, - ParentNamespace: i.Namespace, - NamespaceSelector: i.Spec.PodLogsNamespaceSelector, - Labels: i.Spec.PodLogsSelector, - } -} - -// LogsInstanceSpec controls how an individual instance will be used to -// discover LogMonitors. -type LogsInstanceSpec struct { - // Clients controls where logs are written to for this instance. - Clients []LogsClientSpec `json:"clients,omitempty"` - - // Determines which PodLogs should be selected for including in this - // instance. - PodLogsSelector *metav1.LabelSelector `json:"podLogsSelector,omitempty"` - // Set of labels to determine which namespaces should be watched - // for PodLogs. If not provided, checks only namespace of the - // instance. - PodLogsNamespaceSelector *metav1.LabelSelector `json:"podLogsNamespaceSelector,omitempty"` - - // AdditionalScrapeConfigs allows specifying a key of a Secret containing - // additional Grafana Agent logging scrape configurations. Scrape - // configurations specified are appended to the configurations generated by - // the Grafana Agent Operator. - // - // Job configurations specified must have the form as specified in the - // official Promtail documentation: - // - // https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs - // - // As scrape configs are appended, the user is responsible to make sure it is - // valid. Note that using this feature may expose the possibility to break - // upgrades of Grafana Agent. It is advised to review both Grafana Agent and - // Promtail release notes to ensure that no incompatible scrape configs are - // going to break Grafana Agent after the upgrade. - AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` - - // Configures how tailed targets are watched. - TargetConfig *LogsTargetConfigSpec `json:"targetConfig,omitempty"` -} - -// LogsTargetConfigSpec configures how tailed targets are watched. -type LogsTargetConfigSpec struct { - // Period to resync directories being watched and files being tailed to discover - // new ones or stop watching removed ones. - SyncPeriod string `json:"syncPeriod,omitempty"` -} - -// +kubebuilder:object:root=true - -// LogsInstanceList is a list of LogsInstance. -type LogsInstanceList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - // Items is the list of LogsInstance. - Items []*LogsInstance `json:"items"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:categories="agent-operator" - -// PodLogs defines how to collect logs for a pod. -type PodLogs struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the specification of the desired behavior for the PodLogs. - Spec PodLogsSpec `json:"spec,omitempty"` -} - -// PodLogsSpec defines how to collect logs for a pod. -type PodLogsSpec struct { - // The label to use to retrieve the job name from. - JobLabel string `json:"jobLabel,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. - PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // Selector to select Pod objects. Required. - Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Pod objects are discovered from. - NamespaceSelector prom_v1.NamespaceSelector `json:"namespaceSelector,omitempty"` - - // Pipeline stages for this pod. Pipeline stages support transforming and - // filtering log lines. - PipelineStages []*PipelineStageSpec `json:"pipelineStages,omitempty"` - - // RelabelConfigs to apply to logs before delivering. - // Grafana Agent Operator automatically adds relabelings for a few standard - // Kubernetes fields and replaces original scrape job name with - // __tmp_logs_job_name. - // - // More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs - RelabelConfigs []*prom_v1.RelabelConfig `json:"relabelings,omitempty"` -} - -// +kubebuilder:object:root=true - -// PodLogsList is a list of PodLogs. -type PodLogsList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - // Items is the list of PodLogs. - Items []*PodLogs `json:"items"` -} - -// PipelineStageSpec defines an individual pipeline stage. Each stage type is -// mutually exclusive and no more than one may be set per stage. -// -// More information on pipelines can be found in the Promtail documentation: -// https://grafana.com/docs/loki/latest/clients/promtail/pipelines/ -type PipelineStageSpec struct { - // CRI is a parsing stage that reads log lines using the standard - // CRI logging format. Supply cri: {} to enable. - CRI *CRIStageSpec `json:"cri,omitempty"` - // Docker is a parsing stage that reads log lines using the standard - // Docker logging format. Supply docker: {} to enable. - Docker *DockerStageSpec `json:"docker,omitempty"` - // Drop is a filtering stage that lets you drop certain logs. - Drop *DropStageSpec `json:"drop,omitempty"` - // JSON is a parsing stage that reads the log line as JSON and accepts - // JMESPath expressions to extract data. - // - // Information on JMESPath: http://jmespath.org/ - JSON *JSONStageSpec `json:"json,omitempty"` - // LabelAllow is an action stage that only allows the provided labels to be - // included in the label set that is sent to Loki with the log entry. - LabelAllow []string `json:"labelAllow,omitempty"` - // LabelDrop is an action stage that drops labels from the label set that - // is sent to Loki with the log entry. - LabelDrop []string `json:"labelDrop,omitempty"` - // Labels is an action stage that takes data from the extracted map and - // modifies the label set that is sent to Loki with the log entry. - // - // The key is REQUIRED and represents the name for the label that will - // be created. Value is optional and will be the name from extracted data - // to use for the value of the label. If the value is not provided, it - // defaults to match the key. - Labels map[string]string `json:"labels,omitempty"` - // Limit is a rate-limiting stage that throttles logs based on - // several options. - Limit *LimitStageSpec `json:"limit,omitempty"` - // Match is a filtering stage that conditionally applies a set of stages - // or drop entries when a log entry matches a configurable LogQL stream - // selector and filter expressions. - Match *MatchStageSpec `json:"match,omitempty"` - // Metrics is an action stage that supports defining and updating metrics - // based on data from the extracted map. Created metrics are not pushed to - // Loki or Prometheus and are instead exposed via the /metrics endpoint of - // the Grafana Agent pod. The Grafana Agent Operator should be configured - // with a MetricsInstance that discovers the logging DaemonSet to collect - // metrics created by this stage. - Metrics map[string]MetricsStageSpec `json:"metrics,omitempty"` - // Multiline stage merges multiple lines into a multiline block before - // passing it on to the next stage in the pipeline. - Multiline *MultilineStageSpec `json:"multiline,omitempty"` - // Output stage is an action stage that takes data from the extracted map and - // changes the log line that will be sent to Loki. - Output *OutputStageSpec `json:"output,omitempty"` - // Pack is a transform stage that lets you embed extracted values and labels - // into the log line by packing the log line and labels inside of a JSON - // object. - Pack *PackStageSpec `json:"pack,omitempty"` - // Regex is a parsing stage that parses a log line using a regular - // expression. Named capture groups in the regex allows for adding data into - // the extracted map. - Regex *RegexStageSpec `json:"regex,omitempty"` - // Replace is a parsing stage that parses a log line using a regular - // expression and replaces the log line. Named capture groups in the regex - // allows for adding data into the extracted map. - Replace *ReplaceStageSpec `json:"replace,omitempty"` - // Template is a transform stage that manipulates the values in the extracted - // map using Go's template syntax. - Template *TemplateStageSpec `json:"template,omitempty"` - // Tenant is an action stage that sets the tenant ID for the log entry picking it from a - // field in the extracted data map. If the field is missing, the default - // LogsClientSpec.tenantId will be used. - Tenant *TenantStageSpec `json:"tenant,omitempty"` - // Timestamp is an action stage that can change the timestamp of a log line - // before it is sent to Loki. If not present, the timestamp of a log line - // defaults to the time when the log line was read. - Timestamp *TimestampStageSpec `json:"timestamp,omitempty"` -} - -// CRIStageSpec is a parsing stage that reads log lines using the standard CRI -// logging format. It needs no defined fields. -type CRIStageSpec struct{} - -// DockerStageSpec is a parsing stage that reads log lines using the standard -// Docker logging format. It needs no defined fields. -type DockerStageSpec struct{} - -// DropStageSpec is a filtering stage that lets you drop certain logs. -type DropStageSpec struct { - // Name from the extract data to parse. If empty, uses the log message. - Source string `json:"source,omitempty"` - - // RE2 regular expression. - // - // If source is provided, the regex attempts - // to match the source. - // - // If no source is provided, then the regex attempts - // to attach the log line. - // - // If the provided regex matches the log line or a provided source, the - // line is dropped. - Expression string `json:"expression,omitempty"` - - // Value can only be specified when source is specified. If the value - // provided is an exact match for the given source then the line will be - // dropped. - // - // Mutually exclusive with expression. - Value string `json:"value,omitempty"` - - // OlderThan will be parsed as a Go duration. If the log line's timestamp - // is older than the current time minus the provided duration, it will be - // dropped. - OlderThan string `json:"olderThan,omitempty"` - - // LongerThan will drop a log line if it its content is longer than this - // value (in bytes). Can be expressed as an integer (8192) or a number with a - // suffix (8kb). - LongerThan string `json:"longerThan,omitempty"` - - // Every time a log line is dropped, the metric logentry_dropped_lines_total - // is incremented. A "reason" label is added, and can be customized by - // providing a custom value here. Defaults to "drop_stage". - DropCounterReason string `json:"dropCounterReason,omitempty"` -} - -// JSONStageSpec is a parsing stage that reads the log line as JSON and accepts -// JMESPath expressions to extract data. -type JSONStageSpec struct { - // Name from the extracted data to parse as JSON. If empty, uses entire log - // message. - Source string `json:"source,omitempty"` - - // Set of the key/value pairs of JMESPath expressions. The key will be the - // key in the extracted data while the expression will be the value, - // evaluated as a JMESPath from the source data. - // - // Literal JMESPath expressions can be used by wrapping a key in double - // quotes, which then must be wrapped again in single quotes in YAML - // so they get passed to the JMESPath parser. - Expressions map[string]string `json:"expressions,omitempty"` -} - -// The limit stage is a rate-limiting stage that throttles logs based on -// several options. -type LimitStageSpec struct { - // The rate limit in lines per second that Promtail will push to Loki. - Rate int `json:"rate,omitempty"` - - // The cap in the quantity of burst lines that Promtail will push to Loki. - Burst int `json:"burst,omitempty"` - - // When drop is true, log lines that exceed the current rate limit are discarded. - // When drop is false, log lines that exceed the current rate limit wait - // to enter the back pressure mode. - // - // Defaults to false. - Drop bool `json:"drop,omitempty"` -} - -// MatchStageSpec is a filtering stage that conditionally applies a set of -// stages or drop entries when a log entry matches a configurable LogQL stream -// selector and filter expressions. -type MatchStageSpec struct { - // LogQL stream selector and filter expressions. Required. - Selector string `json:"selector"` - - // Names the pipeline. When defined, creates an additional label - // in the pipeline_duration_seconds histogram, where the value is - // concatenated with job_name using an underscore. - PipelineName string `json:"pipelineName,omitempty"` - - // Determines what action is taken when the selector matches the log line. - // Can be keep or drop. Defaults to keep. When set to drop, entries are - // dropped and no later metrics are recorded. - // Stages must be empty when dropping metrics. - Action string `json:"action,omitempty"` - - // Every time a log line is dropped, the metric logentry_dropped_lines_total - // is incremented. A "reason" label is added, and can be customized by - // providing a custom value here. Defaults to "match_stage." - DropCounterReason string `json:"dropCounterReason,omitempty"` - - // Nested set of pipeline stages to execute when action is keep and the log - // line matches selector. - // - // An example value for stages may be: - // - // stages: | - // - json: {} - // - labelAllow: [foo, bar] - // - // Note that stages is a string because SIG API Machinery does not - // support recursive types, and so it cannot be validated for correctness. Be - // careful not to mistype anything. - Stages string `json:"stages,omitempty"` -} - -// MetricsStageSpec is an action stage that allows for defining and updating -// metrics based on data from the extracted map. Created metrics are not pushed -// to Loki or Prometheus and are instead exposed via the /metrics endpoint of -// the Grafana Agent pod. The Grafana Agent Operator should be configured with -// a MetricsInstance that discovers the logging DaemonSet to collect metrics -// created by this stage. -type MetricsStageSpec struct { - // The metric type to create. Must be one of counter, gauge, histogram. - // Required. - Type string `json:"type"` - - // Sets the description for the created metric. - Description string `json:"description,omitempty"` - - // Sets the custom prefix name for the metric. Defaults to "promtail_custom_". - Prefix string `json:"prefix,omitempty"` - - // Key from the extracted data map to use for the metric. Defaults to the - // metrics name if not present. - Source string `json:"source,omitempty"` - - // Label values on metrics are dynamic which can cause exported metrics - // to go stale. To prevent unbounded cardinality, any metrics not updated - // within MaxIdleDuration are removed. - // - // Must be greater or equal to 1s. Defaults to 5m. - MaxIdleDuration string `json:"maxIdleDuration,omitempty"` - - // If true, all log lines are counted without attempting to match the - // source to the extracted map. Mutually exclusive with value. - // - // Only valid for type: counter. - MatchAll *bool `json:"matchAll,omitempty"` - - // If true all log line bytes are counted. Can only be set with - // matchAll: true and action: add. - // - // Only valid for type: counter. - CountEntryBytes *bool `json:"countEntryBytes,omitempty"` - - // Filters down source data and only changes the metric if the targeted - // value matches the provided string exactly. If not present, all - // data matches. - Value string `json:"value,omitempty"` - - // The action to take against the metric. Required. - // - // Must be either "inc" or "add" for type: counter or type: histogram. - // When type: gauge, must be one of "set", "inc", "dec", "add", or "sub". - // - // "add", "set", or "sub" requires the extracted value to be convertible - // to a positive float. - Action string `json:"action"` - - // Buckets to create. Bucket values must be convertible to float64s. Extremely - // large or small numbers are subject to some loss of precision. - // Only valid for type: histogram. - Buckets []string `json:"buckets,omitempty"` -} - -// MultilineStageSpec merges multiple lines into a multiline block before -// passing it on to the next stage in the pipeline. -type MultilineStageSpec struct { - // RE2 regular expression. Creates a new multiline block when matched. - // Required. - FirstLine string `json:"firstLine"` - - // Maximum time to wait before passing on the multiline block to the next - // stage if no new lines are received. Defaults to 3s. - MaxWaitTime string `json:"maxWaitTime,omitempty"` - - // Maximum number of lines a block can have. A new block is started if - // the number of lines surpasses this value. Defaults to 128. - MaxLines int `json:"maxLines,omitempty"` -} - -// OutputStageSpec is an action stage that takes data from the extracted map -// and changes the log line that will be sent to Loki. -type OutputStageSpec struct { - // Name from extract data to use for the log entry. Required. - Source string `json:"source"` -} - -// PackStageSpec is a transform stage that lets you embed extracted values and -// labels into the log line by packing the log line and labels inside of a JSON -// object. -type PackStageSpec struct { - // Name from extracted data or line labels. Required. - // Labels provided here are automatically removed from output labels. - Labels []string `json:"labels"` - - // If the resulting log line should use any existing timestamp or use time.Now() - // when the line was created. Set to true when combining several log streams from - // different containers to avoid out of order errors. - IngestTimestamp bool `json:"ingestTimestamp,omitempty"` -} - -// RegexStageSpec is a parsing stage that parses a log line using a regular -// expression. Named capture groups in the regex allows for adding data into -// the extracted map. -type RegexStageSpec struct { - // Name from extracted data to parse. If empty, defaults to using the log - // message. - Source string `json:"source,omitempty"` - - // RE2 regular expression. Each capture group MUST be named. Required. - Expression string `json:"expression"` -} - -// ReplaceStageSpec is a parsing stage that parses a log line using a regular -// expression and replaces the log line. Named capture groups in the regex -// allows for adding data into the extracted map. -type ReplaceStageSpec struct { - // Name from extracted data to parse. If empty, defaults to using the log - // message. - Source string `json:"source,omitempty"` - - // RE2 regular expression. Each capture group MUST be named. Required. - Expression string `json:"expression"` - - // Value to replace the captured group with. - Replace string `json:"replace,omitempty"` -} - -// TemplateStageSpec is a transform stage that manipulates the values in the -// extracted map using Go's template syntax. -type TemplateStageSpec struct { - // Name from extracted data to parse. Required. If empty, defaults to using - // the log message. - Source string `json:"source"` - - // Go template string to use. Required. In addition to normal template - // functions, ToLower, ToUpper, Replace, Trim, TrimLeft, TrimRight, - // TrimPrefix, and TrimSpace are also available. - Template string `json:"template"` -} - -// TenantStageSpec is an action stage that sets the tenant ID for the log entry -// picking it from a field in the extracted data map. -type TenantStageSpec struct { - // Name from labels whose value should be set as tenant ID. Mutually exclusive with - // source and value. - Label string `json:"label,omitempty"` - - // Name from extracted data to use as the tenant ID. Mutually exclusive with - // label and value. - Source string `json:"source,omitempty"` - - // Value to use for the template ID. Useful when this stage is used within a - // conditional pipeline such as match. Mutually exclusive with label and source. - Value string `json:"value,omitempty"` -} - -// TimestampStageSpec is an action stage that can change the timestamp of a log -// line before it is sent to Loki. -type TimestampStageSpec struct { - // Name from extracted data to use as the timestamp. Required. - Source string `json:"source"` - - // Determines format of the time string. Required. Can be one of: - // ANSIC, UnixDate, RubyDate, RFC822, RFC822Z, RFC850, RFC1123, RFC1123Z, - // RFC3339, RFC3339Nano, Unix, UnixMs, UnixUs, UnixNs. - Format string `json:"format"` - - // Fallback formats to try if format fails. - FallbackFormats []string `json:"fallbackFormats,omitempty"` - - // IANA Timezone Database string. - Location string `json:"location,omitempty"` - - // Action to take when the timestamp can't be extracted or parsed. - // Can be skip or fudge. Defaults to fudge. - ActionOnFailure string `json:"actionOnFailure,omitempty"` -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/types_metrics.go b/internal/static/operator/apis/monitoring/v1alpha1/types_metrics.go deleted file mode 100644 index 262cd20ef5..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/types_metrics.go +++ /dev/null @@ -1,274 +0,0 @@ -package v1alpha1 - -import ( - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// MetricsSubsystemSpec defines global settings to apply across the -// Metrics subsystem. -type MetricsSubsystemSpec struct { - // RemoteWrite controls default remote_write settings for all instances. If - // an instance does not provide its own RemoteWrite settings, these will be - // used instead. - RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` - // Replicas of each shard to deploy for metrics pods. Number of replicas - // multiplied by the number of shards is the total number of pods created. - Replicas *int32 `json:"replicas,omitempty"` - // Shards to distribute targets onto. Number of replicas multiplied by the - // number of shards is the total number of pods created. Note that scaling - // down shards does not reshard data onto remaining instances; it must be - // manually moved. Increasing shards does not reshard data either, but it will - // continue to be available from the same instances. Sharding is performed on - // the content of the __address__ target meta-label. - Shards *int32 `json:"shards,omitempty"` - // ReplicaExternalLabelName is the name of the metrics external label used - // to denote the replica name. Defaults to __replica__. The external label is _not_ - // added when the value is set to the empty string. - ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` - // MetricsExternalLabelName is the name of the external label used to - // denote Grafana Agent cluster. Defaults to "cluster." The external label is - // _not_ added when the value is set to the empty string. - MetricsExternalLabelName *string `json:"metricsExternalLabelName,omitempty"` - // ScrapeInterval is the time between consecutive scrapes. - ScrapeInterval string `json:"scrapeInterval,omitempty"` - // ScrapeTimeout is the time to wait for a target to respond before marking a - // scrape as failed. - ScrapeTimeout string `json:"scrapeTimeout,omitempty"` - // ExternalLabels are labels to add to any time series when sending data over - // remote_write. - ExternalLabels map[string]string `json:"externalLabels,omitempty"` - // ArbitraryFSAccessThroughSMs configures whether configuration based on a - // ServiceMonitor can access arbitrary files on the file system of the - // Grafana Agent container, e.g., bearer token files. - ArbitraryFSAccessThroughSMs prom_v1.ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` - // OverrideHonorLabels, if true, overrides all configured honor_labels read - // from ServiceMonitor or PodMonitor and sets them to false. - OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` - // OverrideHonorTimestamps allows global enforcement for honoring timestamps in all scrape configs. - OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` - // IgnoreNamespaceSelectors, if true, ignores NamespaceSelector settings - // from the PodMonitor and ServiceMonitor configs, so that they only - // discover endpoints within their current namespace. - IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` - // EnforcedNamespaceLabel enforces adding a namespace label of origin for - // each metric that is user-created. The label value is always the - // namespace of the object that is being created. - EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` - // EnforcedSampleLimit defines a global limit on the number of scraped samples - // that are accepted. This overrides any SampleLimit set per - // ServiceMonitor and/or PodMonitor. It is meant to be used by admins to - // enforce the SampleLimit to keep the overall number of samples and series - // under the desired limit. Note that if a SampleLimit from a ServiceMonitor - // or PodMonitor is lower, that value is used instead. - EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` - // EnforcedTargetLimit defines a global limit on the number of scraped - // targets. This overrides any TargetLimit set per ServiceMonitor and/or - // PodMonitor. It is meant to be used by admins to enforce the TargetLimit to - // keep the overall number of targets under the desired limit. Note that if a - // TargetLimit from a ServiceMonitor or PodMonitor is higher, that value is used instead. - EnforcedTargetLimit *uint64 `json:"enforcedTargetLimit,omitempty"` - - // InstanceSelector determines which MetricsInstances should be selected - // for running. Each instance runs its own set of Metrics components, - // including service discovery, scraping, and remote_write. - InstanceSelector *metav1.LabelSelector `json:"instanceSelector,omitempty"` - // InstanceNamespaceSelector is the set of labels that determines which - // namespaces to watch for MetricsInstances. If not provided, it only checks its own namespace. - InstanceNamespaceSelector *metav1.LabelSelector `json:"instanceNamespaceSelector,omitempty"` -} - -// RemoteWriteSpec defines the remote_write configuration for Prometheus. -type RemoteWriteSpec struct { - // Name of the remote_write queue. Must be unique if specified. The name is - // used in metrics and logging in order to differentiate queues. - Name string `json:"name,omitempty"` - // URL of the endpoint to send samples to. - URL string `json:"url"` - // RemoteTimeout is the timeout for requests to the remote_write endpoint. - RemoteTimeout string `json:"remoteTimeout,omitempty"` - // Headers is a set of custom HTTP headers to be sent along with each - // remote_write request. Be aware that any headers set by Grafana Agent - // itself can't be overwritten. - Headers map[string]string `json:"headers,omitempty"` - // WriteRelabelConfigs holds relabel_configs to relabel samples before they are - // sent to the remote_write endpoint. - WriteRelabelConfigs []prom_v1.RelabelConfig `json:"writeRelabelConfigs,omitempty"` - // BasicAuth for the URL. - BasicAuth *prom_v1.BasicAuth `json:"basicAuth,omitempty"` - // Oauth2 for URL - OAuth2 *prom_v1.OAuth2 `json:"oauth2,omitempty"` - // BearerToken used for remote_write. - BearerToken string `json:"bearerToken,omitempty"` - // BearerTokenFile used to read bearer token. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // SigV4 configures SigV4-based authentication to the remote_write endpoint. - // SigV4-based authentication is used if SigV4 is defined, even with an empty object. - SigV4 *SigV4Config `json:"sigv4,omitempty"` - // TLSConfig to use for remote_write. - TLSConfig *prom_v1.TLSConfig `json:"tlsConfig,omitempty"` - // ProxyURL to proxy requests through. Optional. - ProxyURL string `json:"proxyUrl,omitempty"` - // QueueConfig allows tuning of the remote_write queue parameters. - QueueConfig *QueueConfig `json:"queueConfig,omitempty"` - // MetadataConfig configures the sending of series metadata to remote storage. - MetadataConfig *MetadataConfig `json:"metadataConfig,omitempty"` -} - -// SigV4Config specifies configuration to perform SigV4 authentication. -type SigV4Config struct { - // Region of the AWS endpoint. If blank, the region from the default - // credentials chain is used. - Region string `json:"region,omitempty"` - // AccessKey holds the secret of the AWS API access key to use for signing. - // If not provided, the environment variable AWS_ACCESS_KEY_ID is used. - AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"` - // SecretKey of the AWS API to use for signing. If blank, the environment - // variable AWS_SECRET_ACCESS_KEY is used. - SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"` - // Profile is the named AWS profile to use for authentication. - Profile string `json:"profile,omitempty"` - // RoleARN is the AWS Role ARN to use for authentication, as an alternative - // for using the AWS API keys. - RoleARN string `json:"roleARN,omitempty"` -} - -// QueueConfig allows the tuning of remote_write queue_config parameters. -type QueueConfig struct { - // Capacity is the number of samples to buffer per shard before samples start being dropped. - Capacity int `json:"capacity,omitempty"` - // MinShards is the minimum number of shards, i.e., the amount of concurrency. - MinShards int `json:"minShards,omitempty"` - // MaxShards is the maximum number of shards, i.e., the amount of concurrency. - MaxShards int `json:"maxShards,omitempty"` - // MaxSamplesPerSend is the maximum number of samples per send. - MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` - // BatchSendDeadline is the maximum time a sample will wait in the buffer. - BatchSendDeadline string `json:"batchSendDeadline,omitempty"` - // MaxRetries is the maximum number of times to retry a batch on recoverable errors. - MaxRetries int `json:"maxRetries,omitempty"` - // MinBackoff is the initial retry delay. MinBackoff is doubled for every retry. - MinBackoff string `json:"minBackoff,omitempty"` - // MaxBackoff is the maximum retry delay. - MaxBackoff string `json:"maxBackoff,omitempty"` - // RetryOnRateLimit retries requests when encountering rate limits. - RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` -} - -// MetadataConfig configures the sending of series metadata to remote storage. -type MetadataConfig struct { - // Send enables metric metadata to be sent to remote storage. - Send bool `json:"send,omitempty"` - // SendInterval controls how frequently metric metadata is sent to remote storage. - SendInterval string `json:"sendInterval,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path="metricsinstances" -// +kubebuilder:resource:singular="metricsinstance" -// +kubebuilder:resource:categories="agent-operator" - -// MetricsInstance controls an individual Metrics instance within a -// Grafana Agent deployment. -type MetricsInstance struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the specification of the desired behavior for the Metrics - // instance. - Spec MetricsInstanceSpec `json:"spec,omitempty"` -} - -// ServiceMonitorSelector returns a selector to find ServiceMonitors. -func (p *MetricsInstance) ServiceMonitorSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &prom_v1.ServiceMonitor{}, - ParentNamespace: p.Namespace, - NamespaceSelector: p.Spec.ServiceMonitorNamespaceSelector, - Labels: p.Spec.ServiceMonitorSelector, - } -} - -// PodMonitorSelector returns a selector to find PodMonitors. -func (p *MetricsInstance) PodMonitorSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &prom_v1.PodMonitor{}, - ParentNamespace: p.Namespace, - NamespaceSelector: p.Spec.PodMonitorNamespaceSelector, - Labels: p.Spec.PodMonitorSelector, - } -} - -// ProbeSelector returns a selector to find Probes. -func (p *MetricsInstance) ProbeSelector() ObjectSelector { - return ObjectSelector{ - ObjectType: &prom_v1.Probe{}, - ParentNamespace: p.Namespace, - NamespaceSelector: p.Spec.ProbeNamespaceSelector, - Labels: p.Spec.ProbeSelector, - } -} - -// MetricsInstanceSpec controls how an individual instance is used to discover PodMonitors. -type MetricsInstanceSpec struct { - // WALTruncateFrequency specifies how frequently to run the WAL truncation process. - // Higher values cause the WAL to increase and for old series to - // stay in the WAL longer, but reduces the chance of data loss when - // remote_write fails for longer than the given frequency. - WALTruncateFrequency string `json:"walTruncateFrequency,omitempty"` - // MinWALTime is the minimum amount of time that series and samples can exist in - // the WAL before being considered for deletion. - MinWALTime string `json:"minWALTime,omitempty"` - // MaxWALTime is the maximum amount of time that series and samples can exist in - // the WAL before being forcibly deleted. - MaxWALTime string `json:"maxWALTime,omitempty"` - // RemoteFlushDeadline is the deadline for flushing data when an instance - // shuts down. - RemoteFlushDeadline string `json:"remoteFlushDeadline,omitempty"` - // WriteStaleOnShutdown writes staleness markers on shutdown for all series. - WriteStaleOnShutdown *bool `json:"writeStaleOnShutdown,omitempty"` - // ServiceMonitorSelector determines which ServiceMonitors to select - // for target discovery. - ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` - // ServiceMonitorNamespaceSelector is the set of labels that determine which - // namespaces to watch for ServiceMonitor discovery. If nil, it only checks its own namespace. - ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` - // PodMonitorSelector determines which PodMonitors to selected for target - // discovery. Experimental. - PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` - // PodMonitorNamespaceSelector are the set of labels to determine which - // namespaces to watch for PodMonitor discovery. If nil, it only checks its own - // namespace. - PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` - // ProbeSelector determines which Probes to select for target - // discovery. - ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` - // ProbeNamespaceSelector is the set of labels that determines which namespaces - // to watch for Probe discovery. If nil, it only checks own namespace. - ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` - // RemoteWrite controls remote_write settings for this instance. - RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` - // AdditionalScrapeConfigs lets you specify a key of a Secret containing - // additional Grafana Agent Prometheus scrape configurations. The specified scrape - // configurations are appended to the configurations generated by - // Grafana Agent Operator. Specified job configurations must have the - // form specified in the official Prometheus documentation: - // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. - // As scrape configs are appended, you must make sure the configuration is still - // valid. Note that it's possible that this feature will break future - // upgrades of Grafana Agent. Review both Grafana Agent and - // Prometheus release notes to ensure that no incompatible scrape configs will - // break Grafana Agent after the upgrade. - AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` -} - -// +kubebuilder:object:root=true - -// MetricsInstanceList is a list of MetricsInstance. -type MetricsInstanceList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - // Items is the list of MetricsInstance. - Items []*MetricsInstance `json:"items"` -} diff --git a/internal/static/operator/apis/monitoring/v1alpha1/zz_generated.deepcopy.go b/internal/static/operator/apis/monitoring/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 97872384f8..0000000000 --- a/internal/static/operator/apis/monitoring/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1491 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CRIStageSpec) DeepCopyInto(out *CRIStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIStageSpec. -func (in *CRIStageSpec) DeepCopy() *CRIStageSpec { - if in == nil { - return nil - } - out := new(CRIStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - if in.Agent != nil { - in, out := &in.Agent, &out.Agent - *out = new(GrafanaAgent) - (*in).DeepCopyInto(*out) - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]MetricsDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Logs != nil { - in, out := &in.Logs, &out.Logs - *out = make([]LogsDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Integrations != nil { - in, out := &in.Integrations, &out.Integrations - *out = make([]IntegrationsDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make(assets.SecretStore, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerStageSpec) DeepCopyInto(out *DockerStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStageSpec. -func (in *DockerStageSpec) DeepCopy() *DockerStageSpec { - if in == nil { - return nil - } - out := new(DockerStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DropStageSpec) DeepCopyInto(out *DropStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DropStageSpec. -func (in *DropStageSpec) DeepCopy() *DropStageSpec { - if in == nil { - return nil - } - out := new(DropStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GrafanaAgent) DeepCopyInto(out *GrafanaAgent) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrafanaAgent. -func (in *GrafanaAgent) DeepCopy() *GrafanaAgent { - if in == nil { - return nil - } - out := new(GrafanaAgent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GrafanaAgent) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GrafanaAgentList) DeepCopyInto(out *GrafanaAgentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*GrafanaAgent, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GrafanaAgent) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrafanaAgentList. -func (in *GrafanaAgentList) DeepCopy() *GrafanaAgentList { - if in == nil { - return nil - } - out := new(GrafanaAgentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GrafanaAgentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GrafanaAgentSpec) DeepCopyInto(out *GrafanaAgentSpec) { - *out = *in - if in.APIServerConfig != nil { - in, out := &in.APIServerConfig, &out.APIServerConfig - *out = new(v1.APIServerConfig) - (*in).DeepCopyInto(*out) - } - if in.PodMetadata != nil { - in, out := &in.PodMetadata, &out.PodMetadata - *out = new(v1.EmbeddedObjectMetadata) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(string) - **out = **in - } - if in.ConfigReloaderImage != nil { - in, out := &in.ConfigReloaderImage, &out.ConfigReloaderImage - *out = new(string) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - *out = new(v1.StorageSpec) - (*in).DeepCopyInto(*out) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ConfigMaps != nil { - in, out := &in.ConfigMaps, &out.ConfigMaps - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(corev1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]corev1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(corev1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RuntimeClassName != nil { - in, out := &in.RuntimeClassName, &out.RuntimeClassName - *out = new(string) - **out = **in - } - in.Metrics.DeepCopyInto(&out.Metrics) - in.Logs.DeepCopyInto(&out.Logs) - in.Integrations.DeepCopyInto(&out.Integrations) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrafanaAgentSpec. -func (in *GrafanaAgentSpec) DeepCopy() *GrafanaAgentSpec { - if in == nil { - return nil - } - out := new(GrafanaAgentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Integration) DeepCopyInto(out *Integration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Integration. -func (in *Integration) DeepCopy() *Integration { - if in == nil { - return nil - } - out := new(Integration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Integration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntegrationList) DeepCopyInto(out *IntegrationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*Integration, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Integration) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationList. -func (in *IntegrationList) DeepCopy() *IntegrationList { - if in == nil { - return nil - } - out := new(IntegrationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IntegrationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntegrationSpec) DeepCopyInto(out *IntegrationSpec) { - *out = *in - out.Type = in.Type - in.Config.DeepCopyInto(&out.Config) - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]corev1.SecretKeySelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigMaps != nil { - in, out := &in.ConfigMaps, &out.ConfigMaps - *out = make([]corev1.ConfigMapKeySelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSpec. -func (in *IntegrationSpec) DeepCopy() *IntegrationSpec { - if in == nil { - return nil - } - out := new(IntegrationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntegrationType) DeepCopyInto(out *IntegrationType) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationType. -func (in *IntegrationType) DeepCopy() *IntegrationType { - if in == nil { - return nil - } - out := new(IntegrationType) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntegrationsDeployment) DeepCopyInto(out *IntegrationsDeployment) { - *out = *in - if in.Instance != nil { - in, out := &in.Instance, &out.Instance - *out = new(Integration) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationsDeployment. -func (in *IntegrationsDeployment) DeepCopy() *IntegrationsDeployment { - if in == nil { - return nil - } - out := new(IntegrationsDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IntegrationsSubsystemSpec) DeepCopyInto(out *IntegrationsSubsystemSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationsSubsystemSpec. -func (in *IntegrationsSubsystemSpec) DeepCopy() *IntegrationsSubsystemSpec { - if in == nil { - return nil - } - out := new(IntegrationsSubsystemSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONStageSpec) DeepCopyInto(out *JSONStageSpec) { - *out = *in - if in.Expressions != nil { - in, out := &in.Expressions, &out.Expressions - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONStageSpec. -func (in *JSONStageSpec) DeepCopy() *JSONStageSpec { - if in == nil { - return nil - } - out := new(JSONStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitStageSpec) DeepCopyInto(out *LimitStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitStageSpec. -func (in *LimitStageSpec) DeepCopy() *LimitStageSpec { - if in == nil { - return nil - } - out := new(LimitStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsBackoffConfigSpec) DeepCopyInto(out *LogsBackoffConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsBackoffConfigSpec. -func (in *LogsBackoffConfigSpec) DeepCopy() *LogsBackoffConfigSpec { - if in == nil { - return nil - } - out := new(LogsBackoffConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsClientSpec) DeepCopyInto(out *LogsClientSpec) { - *out = *in - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(v1.BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.OAuth2 != nil { - in, out := &in.OAuth2, &out.OAuth2 - *out = new(v1.OAuth2) - (*in).DeepCopyInto(*out) - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(v1.TLSConfig) - (*in).DeepCopyInto(*out) - } - if in.BackoffConfig != nil { - in, out := &in.BackoffConfig, &out.BackoffConfig - *out = new(LogsBackoffConfigSpec) - **out = **in - } - if in.ExternalLabels != nil { - in, out := &in.ExternalLabels, &out.ExternalLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsClientSpec. -func (in *LogsClientSpec) DeepCopy() *LogsClientSpec { - if in == nil { - return nil - } - out := new(LogsClientSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsDeployment) DeepCopyInto(out *LogsDeployment) { - *out = *in - if in.Instance != nil { - in, out := &in.Instance, &out.Instance - *out = new(LogsInstance) - (*in).DeepCopyInto(*out) - } - if in.PodLogs != nil { - in, out := &in.PodLogs, &out.PodLogs - *out = make([]*PodLogs, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PodLogs) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsDeployment. -func (in *LogsDeployment) DeepCopy() *LogsDeployment { - if in == nil { - return nil - } - out := new(LogsDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsInstance) DeepCopyInto(out *LogsInstance) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInstance. -func (in *LogsInstance) DeepCopy() *LogsInstance { - if in == nil { - return nil - } - out := new(LogsInstance) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LogsInstance) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsInstanceList) DeepCopyInto(out *LogsInstanceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*LogsInstance, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(LogsInstance) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInstanceList. -func (in *LogsInstanceList) DeepCopy() *LogsInstanceList { - if in == nil { - return nil - } - out := new(LogsInstanceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LogsInstanceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsInstanceSpec) DeepCopyInto(out *LogsInstanceSpec) { - *out = *in - if in.Clients != nil { - in, out := &in.Clients, &out.Clients - *out = make([]LogsClientSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PodLogsSelector != nil { - in, out := &in.PodLogsSelector, &out.PodLogsSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.PodLogsNamespaceSelector != nil { - in, out := &in.PodLogsNamespaceSelector, &out.PodLogsNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.AdditionalScrapeConfigs != nil { - in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.TargetConfig != nil { - in, out := &in.TargetConfig, &out.TargetConfig - *out = new(LogsTargetConfigSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInstanceSpec. -func (in *LogsInstanceSpec) DeepCopy() *LogsInstanceSpec { - if in == nil { - return nil - } - out := new(LogsInstanceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsSubsystemSpec) DeepCopyInto(out *LogsSubsystemSpec) { - *out = *in - if in.Clients != nil { - in, out := &in.Clients, &out.Clients - *out = make([]LogsClientSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LogsExternalLabelName != nil { - in, out := &in.LogsExternalLabelName, &out.LogsExternalLabelName - *out = new(string) - **out = **in - } - if in.InstanceSelector != nil { - in, out := &in.InstanceSelector, &out.InstanceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.InstanceNamespaceSelector != nil { - in, out := &in.InstanceNamespaceSelector, &out.InstanceNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsSubsystemSpec. -func (in *LogsSubsystemSpec) DeepCopy() *LogsSubsystemSpec { - if in == nil { - return nil - } - out := new(LogsSubsystemSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LogsTargetConfigSpec) DeepCopyInto(out *LogsTargetConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsTargetConfigSpec. -func (in *LogsTargetConfigSpec) DeepCopy() *LogsTargetConfigSpec { - if in == nil { - return nil - } - out := new(LogsTargetConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MatchStageSpec) DeepCopyInto(out *MatchStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchStageSpec. -func (in *MatchStageSpec) DeepCopy() *MatchStageSpec { - if in == nil { - return nil - } - out := new(MatchStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetadataConfig) DeepCopyInto(out *MetadataConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataConfig. -func (in *MetadataConfig) DeepCopy() *MetadataConfig { - if in == nil { - return nil - } - out := new(MetadataConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsDeployment) DeepCopyInto(out *MetricsDeployment) { - *out = *in - if in.Instance != nil { - in, out := &in.Instance, &out.Instance - *out = new(MetricsInstance) - (*in).DeepCopyInto(*out) - } - if in.ServiceMonitors != nil { - in, out := &in.ServiceMonitors, &out.ServiceMonitors - *out = make([]*v1.ServiceMonitor, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.ServiceMonitor) - (*in).DeepCopyInto(*out) - } - } - } - if in.PodMonitors != nil { - in, out := &in.PodMonitors, &out.PodMonitors - *out = make([]*v1.PodMonitor, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.PodMonitor) - (*in).DeepCopyInto(*out) - } - } - } - if in.Probes != nil { - in, out := &in.Probes, &out.Probes - *out = make([]*v1.Probe, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.Probe) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsDeployment. -func (in *MetricsDeployment) DeepCopy() *MetricsDeployment { - if in == nil { - return nil - } - out := new(MetricsDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsInstance) DeepCopyInto(out *MetricsInstance) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInstance. -func (in *MetricsInstance) DeepCopy() *MetricsInstance { - if in == nil { - return nil - } - out := new(MetricsInstance) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MetricsInstance) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsInstanceList) DeepCopyInto(out *MetricsInstanceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*MetricsInstance, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(MetricsInstance) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInstanceList. -func (in *MetricsInstanceList) DeepCopy() *MetricsInstanceList { - if in == nil { - return nil - } - out := new(MetricsInstanceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MetricsInstanceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsInstanceSpec) DeepCopyInto(out *MetricsInstanceSpec) { - *out = *in - if in.WriteStaleOnShutdown != nil { - in, out := &in.WriteStaleOnShutdown, &out.WriteStaleOnShutdown - *out = new(bool) - **out = **in - } - if in.ServiceMonitorSelector != nil { - in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ServiceMonitorNamespaceSelector != nil { - in, out := &in.ServiceMonitorNamespaceSelector, &out.ServiceMonitorNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.PodMonitorSelector != nil { - in, out := &in.PodMonitorSelector, &out.PodMonitorSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.PodMonitorNamespaceSelector != nil { - in, out := &in.PodMonitorNamespaceSelector, &out.PodMonitorNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ProbeSelector != nil { - in, out := &in.ProbeSelector, &out.ProbeSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ProbeNamespaceSelector != nil { - in, out := &in.ProbeNamespaceSelector, &out.ProbeNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.RemoteWrite != nil { - in, out := &in.RemoteWrite, &out.RemoteWrite - *out = make([]RemoteWriteSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalScrapeConfigs != nil { - in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsInstanceSpec. -func (in *MetricsInstanceSpec) DeepCopy() *MetricsInstanceSpec { - if in == nil { - return nil - } - out := new(MetricsInstanceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsStageSpec) DeepCopyInto(out *MetricsStageSpec) { - *out = *in - if in.MatchAll != nil { - in, out := &in.MatchAll, &out.MatchAll - *out = new(bool) - **out = **in - } - if in.CountEntryBytes != nil { - in, out := &in.CountEntryBytes, &out.CountEntryBytes - *out = new(bool) - **out = **in - } - if in.Buckets != nil { - in, out := &in.Buckets, &out.Buckets - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsStageSpec. -func (in *MetricsStageSpec) DeepCopy() *MetricsStageSpec { - if in == nil { - return nil - } - out := new(MetricsStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsSubsystemSpec) DeepCopyInto(out *MetricsSubsystemSpec) { - *out = *in - if in.RemoteWrite != nil { - in, out := &in.RemoteWrite, &out.RemoteWrite - *out = make([]RemoteWriteSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Shards != nil { - in, out := &in.Shards, &out.Shards - *out = new(int32) - **out = **in - } - if in.ReplicaExternalLabelName != nil { - in, out := &in.ReplicaExternalLabelName, &out.ReplicaExternalLabelName - *out = new(string) - **out = **in - } - if in.MetricsExternalLabelName != nil { - in, out := &in.MetricsExternalLabelName, &out.MetricsExternalLabelName - *out = new(string) - **out = **in - } - if in.ExternalLabels != nil { - in, out := &in.ExternalLabels, &out.ExternalLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.ArbitraryFSAccessThroughSMs = in.ArbitraryFSAccessThroughSMs - if in.EnforcedSampleLimit != nil { - in, out := &in.EnforcedSampleLimit, &out.EnforcedSampleLimit - *out = new(uint64) - **out = **in - } - if in.EnforcedTargetLimit != nil { - in, out := &in.EnforcedTargetLimit, &out.EnforcedTargetLimit - *out = new(uint64) - **out = **in - } - if in.InstanceSelector != nil { - in, out := &in.InstanceSelector, &out.InstanceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.InstanceNamespaceSelector != nil { - in, out := &in.InstanceNamespaceSelector, &out.InstanceNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsSubsystemSpec. -func (in *MetricsSubsystemSpec) DeepCopy() *MetricsSubsystemSpec { - if in == nil { - return nil - } - out := new(MetricsSubsystemSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MultilineStageSpec) DeepCopyInto(out *MultilineStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultilineStageSpec. -func (in *MultilineStageSpec) DeepCopy() *MultilineStageSpec { - if in == nil { - return nil - } - out := new(MultilineStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OutputStageSpec) DeepCopyInto(out *OutputStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputStageSpec. -func (in *OutputStageSpec) DeepCopy() *OutputStageSpec { - if in == nil { - return nil - } - out := new(OutputStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackStageSpec) DeepCopyInto(out *PackStageSpec) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackStageSpec. -func (in *PackStageSpec) DeepCopy() *PackStageSpec { - if in == nil { - return nil - } - out := new(PackStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineStageSpec) DeepCopyInto(out *PipelineStageSpec) { - *out = *in - if in.CRI != nil { - in, out := &in.CRI, &out.CRI - *out = new(CRIStageSpec) - **out = **in - } - if in.Docker != nil { - in, out := &in.Docker, &out.Docker - *out = new(DockerStageSpec) - **out = **in - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = new(DropStageSpec) - **out = **in - } - if in.JSON != nil { - in, out := &in.JSON, &out.JSON - *out = new(JSONStageSpec) - (*in).DeepCopyInto(*out) - } - if in.LabelAllow != nil { - in, out := &in.LabelAllow, &out.LabelAllow - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LabelDrop != nil { - in, out := &in.LabelDrop, &out.LabelDrop - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Limit != nil { - in, out := &in.Limit, &out.Limit - *out = new(LimitStageSpec) - **out = **in - } - if in.Match != nil { - in, out := &in.Match, &out.Match - *out = new(MatchStageSpec) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make(map[string]MetricsStageSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Multiline != nil { - in, out := &in.Multiline, &out.Multiline - *out = new(MultilineStageSpec) - **out = **in - } - if in.Output != nil { - in, out := &in.Output, &out.Output - *out = new(OutputStageSpec) - **out = **in - } - if in.Pack != nil { - in, out := &in.Pack, &out.Pack - *out = new(PackStageSpec) - (*in).DeepCopyInto(*out) - } - if in.Regex != nil { - in, out := &in.Regex, &out.Regex - *out = new(RegexStageSpec) - **out = **in - } - if in.Replace != nil { - in, out := &in.Replace, &out.Replace - *out = new(ReplaceStageSpec) - **out = **in - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(TemplateStageSpec) - **out = **in - } - if in.Tenant != nil { - in, out := &in.Tenant, &out.Tenant - *out = new(TenantStageSpec) - **out = **in - } - if in.Timestamp != nil { - in, out := &in.Timestamp, &out.Timestamp - *out = new(TimestampStageSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStageSpec. -func (in *PipelineStageSpec) DeepCopy() *PipelineStageSpec { - if in == nil { - return nil - } - out := new(PipelineStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogs) DeepCopyInto(out *PodLogs) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogs. -func (in *PodLogs) DeepCopy() *PodLogs { - if in == nil { - return nil - } - out := new(PodLogs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogsList) DeepCopyInto(out *PodLogsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*PodLogs, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PodLogs) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogsList. -func (in *PodLogsList) DeepCopy() *PodLogsList { - if in == nil { - return nil - } - out := new(PodLogsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogsSpec) DeepCopyInto(out *PodLogsSpec) { - *out = *in - if in.PodTargetLabels != nil { - in, out := &in.PodTargetLabels, &out.PodTargetLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Selector.DeepCopyInto(&out.Selector) - in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) - if in.PipelineStages != nil { - in, out := &in.PipelineStages, &out.PipelineStages - *out = make([]*PipelineStageSpec, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PipelineStageSpec) - (*in).DeepCopyInto(*out) - } - } - } - if in.RelabelConfigs != nil { - in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*v1.RelabelConfig, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.RelabelConfig) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogsSpec. -func (in *PodLogsSpec) DeepCopy() *PodLogsSpec { - if in == nil { - return nil - } - out := new(PodLogsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. -func (in *QueueConfig) DeepCopy() *QueueConfig { - if in == nil { - return nil - } - out := new(QueueConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegexStageSpec) DeepCopyInto(out *RegexStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexStageSpec. -func (in *RegexStageSpec) DeepCopy() *RegexStageSpec { - if in == nil { - return nil - } - out := new(RegexStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { - *out = *in - if in.Headers != nil { - in, out := &in.Headers, &out.Headers - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.WriteRelabelConfigs != nil { - in, out := &in.WriteRelabelConfigs, &out.WriteRelabelConfigs - *out = make([]v1.RelabelConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(v1.BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.OAuth2 != nil { - in, out := &in.OAuth2, &out.OAuth2 - *out = new(v1.OAuth2) - (*in).DeepCopyInto(*out) - } - if in.SigV4 != nil { - in, out := &in.SigV4, &out.SigV4 - *out = new(SigV4Config) - (*in).DeepCopyInto(*out) - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(v1.TLSConfig) - (*in).DeepCopyInto(*out) - } - if in.QueueConfig != nil { - in, out := &in.QueueConfig, &out.QueueConfig - *out = new(QueueConfig) - **out = **in - } - if in.MetadataConfig != nil { - in, out := &in.MetadataConfig, &out.MetadataConfig - *out = new(MetadataConfig) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. -func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec { - if in == nil { - return nil - } - out := new(RemoteWriteSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplaceStageSpec) DeepCopyInto(out *ReplaceStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplaceStageSpec. -func (in *ReplaceStageSpec) DeepCopy() *ReplaceStageSpec { - if in == nil { - return nil - } - out := new(ReplaceStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SigV4Config) DeepCopyInto(out *SigV4Config) { - *out = *in - if in.AccessKey != nil { - in, out := &in.AccessKey, &out.AccessKey - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKey != nil { - in, out := &in.SecretKey, &out.SecretKey - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigV4Config. -func (in *SigV4Config) DeepCopy() *SigV4Config { - if in == nil { - return nil - } - out := new(SigV4Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateStageSpec) DeepCopyInto(out *TemplateStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateStageSpec. -func (in *TemplateStageSpec) DeepCopy() *TemplateStageSpec { - if in == nil { - return nil - } - out := new(TemplateStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TenantStageSpec) DeepCopyInto(out *TenantStageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantStageSpec. -func (in *TenantStageSpec) DeepCopy() *TenantStageSpec { - if in == nil { - return nil - } - out := new(TenantStageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TimestampStageSpec) DeepCopyInto(out *TimestampStageSpec) { - *out = *in - if in.FallbackFormats != nil { - in, out := &in.FallbackFormats, &out.FallbackFormats - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimestampStageSpec. -func (in *TimestampStageSpec) DeepCopy() *TimestampStageSpec { - if in == nil { - return nil - } - out := new(TimestampStageSpec) - in.DeepCopyInto(out) - return out -} diff --git a/internal/static/operator/assets/assets.go b/internal/static/operator/assets/assets.go deleted file mode 100644 index 45602c05ed..0000000000 --- a/internal/static/operator/assets/assets.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package assets contains helper types used for loading in static assets when -// configuring the Grafana Agent. -package assets - -import ( - "fmt" - - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - v1 "k8s.io/api/core/v1" -) - -// Key is a path-like identifier representing a Secret or ConfigMap value. It is -// used for looking up values during config generation that cannot be loaded -// directly from a file (e.g., BasicAuth Username). -// -// The naming convention is either: -// -// /secrets/// -// -// or: -// -// /configMaps/// -// -// Resources associated with a key should be watched for changes and trigger a -// reconcile when modified. -type Key string - -// SecretStore is an in-memory cache for secrets, intended to be used for -// static secrets in generated configuration files. -type SecretStore map[Key]string - -// KeyForSecret returns the key for a given namespace and a secret key -// selector. -func KeyForSecret(namespace string, sel *v1.SecretKeySelector) Key { - if sel == nil { - return Key("") - } - return Key(fmt.Sprintf("/secrets/%s/%s/%s", namespace, sel.Name, sel.Key)) -} - -// KeyForConfigMap returns the key for a given namespace and a config map -// key selector. -func KeyForConfigMap(namespace string, sel *v1.ConfigMapKeySelector) Key { - if sel == nil { - return Key("") - } - return Key(fmt.Sprintf("/configMaps/%s/%s/%s", namespace, sel.Name, sel.Key)) -} - -// KeyForSelector retrieves the key for a SecretOrConfigMap. -func KeyForSelector(namespace string, sel *prom_v1.SecretOrConfigMap) Key { - switch { - case sel.ConfigMap != nil: - return KeyForConfigMap(namespace, sel.ConfigMap) - case sel.Secret != nil: - return KeyForSecret(namespace, sel.Secret) - default: - return Key("") - } -} diff --git a/internal/static/operator/build_hierarchy.go b/internal/static/operator/build_hierarchy.go deleted file mode 100644 index 0830ad43ca..0000000000 --- a/internal/static/operator/build_hierarchy.go +++ /dev/null @@ -1,297 +0,0 @@ -package operator - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/grafana/agent/internal/static/operator/config" - "github.com/grafana/agent/internal/static/operator/hierarchy" - prom "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// buildHierarchy constructs a resource hierarchy starting from root. -func buildHierarchy(ctx context.Context, l log.Logger, cli client.Client, root *gragent.GrafanaAgent) (deployment gragent.Deployment, watchers []hierarchy.Watcher, err error) { - deployment.Agent = root - - // search is used throughout BuildHierarchy, where it will perform a list for - // a set of objects in the hierarchy and populate the watchers return - // variable. - search := func(resources []hierarchyResource) error { - for _, res := range resources { - sel, err := res.Find(ctx, cli) - if err != nil { - gvk, _ := apiutil.GVKForObject(res.List, cli.Scheme()) - return fmt.Errorf("failed to find %q resource: %w", gvk.String(), err) - } - - watchers = append(watchers, hierarchy.Watcher{ - Object: res.Selector.ObjectType, - Owner: client.ObjectKeyFromObject(root), - Selector: sel, - }) - } - return nil - } - - // Root resources - var ( - metricInstances gragent.MetricsInstanceList - logsInstances gragent.LogsInstanceList - integrations gragent.IntegrationList - ) - var roots = []hierarchyResource{ - {List: &metricInstances, Selector: root.MetricsInstanceSelector()}, - {List: &logsInstances, Selector: root.LogsInstanceSelector()}, - {List: &integrations, Selector: root.IntegrationsSelector()}, - } - if err := search(roots); err != nil { - return deployment, nil, err - } - - // Metrics resources - for _, metricsInst := range metricInstances.Items { - var ( - serviceMonitors prom.ServiceMonitorList - podMonitors prom.PodMonitorList - probes prom.ProbeList - ) - var children = []hierarchyResource{ - {List: &serviceMonitors, Selector: metricsInst.ServiceMonitorSelector()}, - {List: &podMonitors, Selector: metricsInst.PodMonitorSelector()}, - {List: &probes, Selector: metricsInst.ProbeSelector()}, - } - if err := search(children); err != nil { - return deployment, nil, err - } - - deployment.Metrics = append(deployment.Metrics, gragent.MetricsDeployment{ - Instance: metricsInst, - ServiceMonitors: filterServiceMonitors(l, root, &serviceMonitors).Items, - PodMonitors: podMonitors.Items, - Probes: probes.Items, - }) - } - - // Logs resources - for _, logsInst := range logsInstances.Items { - var ( - podLogs gragent.PodLogsList - ) - var children = []hierarchyResource{ - {List: &podLogs, Selector: logsInst.PodLogsSelector()}, - } - if err := search(children); err != nil { - return deployment, nil, err - } - - deployment.Logs = append(deployment.Logs, gragent.LogsDeployment{ - Instance: logsInst, - PodLogs: podLogs.Items, - }) - } - - // Integration resources - for _, integration := range integrations.Items { - deployment.Integrations = append(deployment.Integrations, gragent.IntegrationsDeployment{ - Instance: integration, - }) - } - - // Finally, find all referenced secrets - secrets, secretWatchers, err := buildSecrets(ctx, cli, deployment) - if err != nil { - return deployment, nil, fmt.Errorf("failed to discover secrets: %w", err) - } - deployment.Secrets = secrets - watchers = append(watchers, secretWatchers...) - - return deployment, watchers, nil -} - -type hierarchyResource struct { - List client.ObjectList // List to populate - Selector gragent.ObjectSelector // Raw selector to use for list -} - -func (hr *hierarchyResource) Find(ctx context.Context, cli client.Client) (hierarchy.Selector, error) { - sel, err := toSelector(hr.Selector) - if err != nil { - return nil, fmt.Errorf("failed to build selector: %w", err) - } - err = hierarchy.List(ctx, cli, hr.List, sel) - if err != nil { - return nil, fmt.Errorf("failed to list resources: %w", err) - } - return sel, nil -} - -func toSelector(os gragent.ObjectSelector) (hierarchy.Selector, error) { - var res hierarchy.LabelsSelector - res.NamespaceName = os.ParentNamespace - - if os.NamespaceSelector != nil { - sel, err := metav1.LabelSelectorAsSelector(os.NamespaceSelector) - if err != nil { - return nil, fmt.Errorf("invalid namespace selector: %w", err) - } - res.NamespaceLabels = sel - } - - sel, err := metav1.LabelSelectorAsSelector(os.Labels) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %w", err) - } - res.Labels = sel - return &res, nil -} - -func filterServiceMonitors(l log.Logger, root *gragent.GrafanaAgent, list *prom.ServiceMonitorList) *prom.ServiceMonitorList { - items := make([]*prom.ServiceMonitor, 0, len(list.Items)) - -Item: - for _, item := range list.Items { - if root.Spec.Metrics.ArbitraryFSAccessThroughSMs.Deny { - for _, ep := range item.Spec.Endpoints { - err := testForArbitraryFSAccess(ep) - if err == nil { - continue - } - - level.Warn(l).Log( - "msg", "skipping service monitor", - "agent", client.ObjectKeyFromObject(root), - "servicemonitor", client.ObjectKeyFromObject(item), - "err", err, - ) - continue Item - } - } - items = append(items, item) - } - - return &prom.ServiceMonitorList{ - TypeMeta: list.TypeMeta, - ListMeta: *list.ListMeta.DeepCopy(), - Items: items, - } -} - -func testForArbitraryFSAccess(e prom.Endpoint) error { - if e.BearerTokenFile != "" { - return fmt.Errorf("it accesses file system via bearer token file which is disallowed via GrafanaAgent specification") - } - - if e.TLSConfig == nil { - return nil - } - - if e.TLSConfig.CAFile != "" || e.TLSConfig.CertFile != "" || e.TLSConfig.KeyFile != "" { - return fmt.Errorf("it accesses file system via TLS config which is disallowed via GrafanaAgent specification") - } - - return nil -} - -func buildSecrets(ctx context.Context, cli client.Client, deploy gragent.Deployment) (secrets assets.SecretStore, watchers []hierarchy.Watcher, err error) { - secrets = make(assets.SecretStore) - - // KeySelector caches to make sure we don't create duplicate watchers. - var ( - usedSecretSelectors = map[hierarchy.KeySelector]struct{}{} - usedConfigMapSelectors = map[hierarchy.KeySelector]struct{}{} - ) - - for _, ref := range config.AssetReferences(deploy) { - var ( - objectList client.ObjectList - sel hierarchy.KeySelector - ) - - switch { - case ref.Reference.Secret != nil: - objectList = &corev1.SecretList{} - sel = hierarchy.KeySelector{ - Namespace: ref.Namespace, - Name: ref.Reference.Secret.Name, - } - case ref.Reference.ConfigMap != nil: - objectList = &corev1.ConfigMapList{} - sel = hierarchy.KeySelector{ - Namespace: ref.Namespace, - Name: ref.Reference.ConfigMap.Name, - } - } - - gvk, _ := apiutil.GVKForObject(objectList, cli.Scheme()) - if err := hierarchy.List(ctx, cli, objectList, &sel); err != nil { - return nil, nil, fmt.Errorf("failed to find %q resource: %w", gvk.String(), err) - } - - err := meta.EachListItem(objectList, func(o runtime.Object) error { - var value string - - switch o := o.(type) { - case *corev1.Secret: - rawValue, ok := o.Data[ref.Reference.Secret.Key] - if !ok { - return fmt.Errorf("no key %s in Secret %s", ref.Reference.Secret.Key, o.Name) - } - value = string(rawValue) - case *corev1.ConfigMap: - var ( - dataValue, dataFound = o.Data[ref.Reference.ConfigMap.Key] - binaryValue, binaryFound = o.BinaryData[ref.Reference.ConfigMap.Key] - ) - - if dataFound { - value = dataValue - } else if binaryFound { - value = string(binaryValue) - } else { - return fmt.Errorf("no key %s in ConfigMap %s", ref.Reference.ConfigMap.Key, o.Name) - } - } - - secrets[assets.KeyForSelector(ref.Namespace, &ref.Reference)] = value - return nil - }) - if err != nil { - return nil, nil, fmt.Errorf("failed to iterate over %q list: %w", gvk.String(), err) - } - - switch { - case ref.Reference.Secret != nil: - if _, used := usedSecretSelectors[sel]; used { - continue - } - watchers = append(watchers, hierarchy.Watcher{ - Object: &corev1.Secret{}, - Owner: client.ObjectKeyFromObject(deploy.Agent), - Selector: &sel, - }) - usedSecretSelectors[sel] = struct{}{} - case ref.Reference.ConfigMap != nil: - if _, used := usedConfigMapSelectors[sel]; used { - continue - } - watchers = append(watchers, hierarchy.Watcher{ - Object: &corev1.ConfigMap{}, - Owner: client.ObjectKeyFromObject(deploy.Agent), - Selector: &sel, - }) - usedConfigMapSelectors[sel] = struct{}{} - } - } - - return secrets, watchers, nil -} diff --git a/internal/static/operator/build_hierarchy_test.go b/internal/static/operator/build_hierarchy_test.go deleted file mode 100644 index 735545f0c0..0000000000 --- a/internal/static/operator/build_hierarchy_test.go +++ /dev/null @@ -1,202 +0,0 @@ -//go:build !nonetwork && !nodocker && !race - -package operator - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/hierarchy" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/k8s" - "github.com/grafana/agent/internal/util/structwalk" - prom "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - controller "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// Test_buildHierarchy checks that an entire resource hierarchy can be -// discovered. -func Test_buildHierarchy(t *testing.T) { - // TODO: this is broken with go 1.20.6 - // waiting on https://github.com/testcontainers/testcontainers-go/issues/1359 - t.Skip() - var wg sync.WaitGroup - defer wg.Wait() - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - l := util.TestLogger(t) - cluster := NewTestCluster(ctx, t, l) - cli := newTestControllerClient(t, cluster) - - resources := k8s.NewResourceSet(l, cluster) - defer resources.Stop() - require.NoError(t, resources.AddFile(ctx, "./testdata/test-resource-hierarchy.yaml")) - - // Get root resource - var root gragent.GrafanaAgent - err := cli.Get(ctx, client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, &root) - require.NoError(t, err) - - deployment, watchers, err := buildHierarchy(ctx, l, cli, &root) - require.NoError(t, err) - - // Check resources in hierarchy - { - expectedResources := []string{ - "GrafanaAgent/grafana-agent-example", - "MetricsInstance/primary", - "Integration/node-exporter", - "LogsInstance/primary", - "PodMonitor/grafana-agents", - "PodLogs/grafana-agents", - } - var gotResources []string - structwalk.Walk(&resourceWalker{ - onResource: func(c client.Object) { - gvk, _ := apiutil.GVKForObject(c, cli.Scheme()) - - key := fmt.Sprintf("%s/%s", gvk.Kind, c.GetName()) - gotResources = append(gotResources, key) - }, - }, deployment) - - require.ElementsMatch(t, expectedResources, gotResources) - } - - // Check secrets - { - expectedSecrets := []string{ - "/secrets/default/prometheus-fake-credentials/fakeUsername", - "/secrets/default/prometheus-fake-credentials/fakePassword", - } - var actualSecrets []string - for key := range deployment.Secrets { - actualSecrets = append(actualSecrets, string(key)) - } - - require.ElementsMatch(t, expectedSecrets, actualSecrets) - } - - // Check configured watchers - { - expectedWatchers := []hierarchy.Watcher{ - { - Object: &gragent.MetricsInstance{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - Labels: labels.SelectorFromSet(labels.Set{"agent": "grafana-agent-example"}), - }, - }, - { - Object: &gragent.LogsInstance{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - Labels: labels.SelectorFromSet(labels.Set{"agent": "grafana-agent-example"}), - }, - }, - { - Object: &gragent.Integration{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - Labels: labels.SelectorFromSet(labels.Set{"agent": "grafana-agent-example"}), - }, - }, - { - Object: &prom.ServiceMonitor{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - NamespaceLabels: labels.Everything(), - Labels: labels.SelectorFromSet(labels.Set{"instance": "primary"}), - }, - }, - { - Object: &prom.PodMonitor{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - NamespaceLabels: labels.Everything(), - Labels: labels.SelectorFromSet(labels.Set{"instance": "primary"}), - }, - }, - { - Object: &prom.Probe{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - Labels: labels.Nothing(), - }, - }, - { - Object: &gragent.PodLogs{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.LabelsSelector{ - NamespaceName: "default", - NamespaceLabels: labels.Everything(), - Labels: labels.SelectorFromSet(labels.Set{"instance": "primary"}), - }, - }, - { - Object: &v1.Secret{}, - Owner: client.ObjectKey{Namespace: "default", Name: "grafana-agent-example"}, - Selector: &hierarchy.KeySelector{ - Namespace: "default", - Name: "prometheus-fake-credentials", - }, - }, - } - require.ElementsMatch(t, expectedWatchers, watchers) - } -} - -type resourceWalker struct { - onResource func(c client.Object) -} - -func (w *resourceWalker) Visit(v interface{}) (next structwalk.Visitor) { - if v == nil { - return nil - } - if obj, ok := v.(client.Object); ok { - w.onResource(obj) - } - return w -} - -// newTestControllerClient creates a Kubernetes client which uses a cache and -// index for retrieving objects. This more closely matches the behavior of the -// operator instead of using cluster.Client, which lacks a cache and always -// communicates directly with Kubernetes. -func newTestControllerClient(t *testing.T, cluster *k8s.Cluster) client.Client { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mgr, err := controller.NewManager(cluster.GetConfig(), manager.Options{ - Scheme: cluster.Client().Scheme(), - }) - require.NoError(t, err) - - go func() { - require.NoError(t, mgr.Start(ctx)) - }() - require.True(t, mgr.GetCache().WaitForCacheSync(ctx)) - - return mgr.GetClient() -} diff --git a/internal/static/operator/clientutil/clientutil.go b/internal/static/operator/clientutil/clientutil.go deleted file mode 100644 index 88710a330b..0000000000 --- a/internal/static/operator/clientutil/clientutil.go +++ /dev/null @@ -1,268 +0,0 @@ -package clientutil - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - apps_v1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - k8s_errors "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/validation" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var invalidDNS1123Characters = regexp.MustCompile("[^-a-z0-9]+") - -// SanitizeVolumeName ensures that the given volume name is a valid DNS-1123 label -// accepted by Kubernetes. -// -// Copied from github.com/prometheus-operator/prometheus-operator/pkg/k8sutil. -func SanitizeVolumeName(name string) string { - name = strings.ToLower(name) - name = invalidDNS1123Characters.ReplaceAllString(name, "-") - if len(name) > validation.DNS1123LabelMaxLength { - name = name[0:validation.DNS1123LabelMaxLength] - } - return strings.Trim(name, "-") -} - -// CreateOrUpdateSecret applies the given secret against the client. -func CreateOrUpdateSecret(ctx context.Context, c client.Client, s *v1.Secret) error { - var exist v1.Secret - err := c.Get(ctx, client.ObjectKeyFromObject(s), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing service: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, s) - if err != nil { - return fmt.Errorf("failed to create service: %w", err) - } - } else { - s.ResourceVersion = exist.ResourceVersion - s.SetOwnerReferences(mergeOwnerReferences(s.GetOwnerReferences(), exist.GetOwnerReferences())) - s.SetLabels(mergeMaps(s.Labels, exist.Labels)) - s.SetAnnotations(mergeMaps(s.Annotations, exist.Annotations)) - - err := c.Update(ctx, s) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to update service: %w", err) - } - } - - return nil -} - -// CreateOrUpdateService applies the given svc against the client. -func CreateOrUpdateService(ctx context.Context, c client.Client, svc *v1.Service) error { - var exist v1.Service - err := c.Get(ctx, client.ObjectKeyFromObject(svc), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing service: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, svc) - if err != nil { - return fmt.Errorf("failed to create service: %w", err) - } - } else { - svc.ResourceVersion = exist.ResourceVersion - svc.Spec.IPFamilies = exist.Spec.IPFamilies - svc.SetOwnerReferences(mergeOwnerReferences(svc.GetOwnerReferences(), exist.GetOwnerReferences())) - svc.SetLabels(mergeMaps(svc.Labels, exist.Labels)) - svc.SetAnnotations(mergeMaps(svc.Annotations, exist.Annotations)) - - err := c.Update(ctx, svc) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to update service: %w", err) - } - } - - return nil -} - -// CreateOrUpdateEndpoints applies the given eps against the client. -func CreateOrUpdateEndpoints(ctx context.Context, c client.Client, eps *v1.Endpoints) error { - var exist v1.Endpoints - err := c.Get(ctx, client.ObjectKeyFromObject(eps), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing endpoints: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, eps) - if err != nil { - return fmt.Errorf("failed to create endpoints: %w", err) - } - } else { - eps.ResourceVersion = exist.ResourceVersion - eps.SetOwnerReferences(mergeOwnerReferences(eps.GetOwnerReferences(), exist.GetOwnerReferences())) - eps.SetLabels(mergeMaps(eps.Labels, exist.Labels)) - eps.SetAnnotations(mergeMaps(eps.Annotations, exist.Annotations)) - - err := c.Update(ctx, eps) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to update endpoints: %w", err) - } - } - - return nil -} - -// CreateOrUpdateStatefulSet applies the given StatefulSet against the client. -func CreateOrUpdateStatefulSet(ctx context.Context, c client.Client, ss *apps_v1.StatefulSet, l log.Logger) error { - var exist apps_v1.StatefulSet - err := c.Get(ctx, client.ObjectKeyFromObject(ss), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing statefulset: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, ss) - if err != nil { - return fmt.Errorf("failed to create statefulset: %w", err) - } - } else { - ss.ResourceVersion = exist.ResourceVersion - ss.SetOwnerReferences(mergeOwnerReferences(ss.GetOwnerReferences(), exist.GetOwnerReferences())) - ss.SetLabels(mergeMaps(ss.Labels, exist.Labels)) - ss.SetAnnotations(mergeMaps(ss.Annotations, exist.Annotations)) - - err := c.Update(ctx, ss) - // Statefulsets have a large number of fields that are immutable after creation, - // so we sometimes need to delete and recreate. - // We should be mindful when making changes to try and avoid this when possible. - if k8s_errors.IsNotAcceptable(err) || k8s_errors.IsInvalid(err) { - level.Error(l).Log("msg", "error updating StatefulSet. Attempting to recreate", "err", err.Error()) - // Resource version should only be set when updating - ss.ResourceVersion = "" - - // do a quicker deletion of the old statefulset to minimize downtime before we spin up new pods - err = c.Delete(ctx, ss, client.GracePeriodSeconds(5)) - if err != nil { - return fmt.Errorf("failed to update statefulset when deleting old statefulset: %w", err) - } - err = c.Create(ctx, ss) - if err != nil { - return fmt.Errorf("failed to update statefulset when creating replacement statefulset: %w", err) - } - } else if err != nil { - return fmt.Errorf("failed to update statefulset: %w", err) - } - } - - return nil -} - -// CreateOrUpdateDaemonSet applies the given DaemonSet against the client. -func CreateOrUpdateDaemonSet(ctx context.Context, c client.Client, ss *apps_v1.DaemonSet, l log.Logger) error { - var exist apps_v1.DaemonSet - err := c.Get(ctx, client.ObjectKeyFromObject(ss), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing daemonset: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, ss) - if err != nil { - return fmt.Errorf("failed to create daemonset: %w", err) - } - } else { - ss.ResourceVersion = exist.ResourceVersion - ss.SetOwnerReferences(mergeOwnerReferences(ss.GetOwnerReferences(), exist.GetOwnerReferences())) - ss.SetLabels(mergeMaps(ss.Labels, exist.Labels)) - ss.SetAnnotations(mergeMaps(ss.Annotations, exist.Annotations)) - - err := c.Update(ctx, ss) - if k8s_errors.IsNotAcceptable(err) || k8s_errors.IsInvalid(err) { - level.Error(l).Log("msg", "error updating Daemonset. Attempting to recreate", "err", err.Error()) - // Resource version should only be set when updating - ss.ResourceVersion = "" - - err = c.Delete(ctx, ss) - if err != nil { - return fmt.Errorf("failed to update daemonset: deleting old daemonset: %w", err) - } - err = c.Create(ctx, ss) - if err != nil { - return fmt.Errorf("failed to update daemonset: creating new deamonset: %w", err) - } - } else if err != nil { - return fmt.Errorf("failed to update daemonset: %w", err) - } - } - - return nil -} - -// CreateOrUpdateDeployment applies the given DaemonSet against the client. -func CreateOrUpdateDeployment(ctx context.Context, c client.Client, d *apps_v1.Deployment, l log.Logger) error { - var exist apps_v1.Deployment - err := c.Get(ctx, client.ObjectKeyFromObject(d), &exist) - if err != nil && !k8s_errors.IsNotFound(err) { - return fmt.Errorf("failed to retrieve existing Deployment: %w", err) - } - - if k8s_errors.IsNotFound(err) { - err := c.Create(ctx, d) - if err != nil { - return fmt.Errorf("failed to create Deployment: %w", err) - } - } else { - d.ResourceVersion = exist.ResourceVersion - d.SetOwnerReferences(mergeOwnerReferences(d.GetOwnerReferences(), exist.GetOwnerReferences())) - d.SetLabels(mergeMaps(d.Labels, exist.Labels)) - d.SetAnnotations(mergeMaps(d.Annotations, exist.Annotations)) - - err := c.Update(ctx, d) - if k8s_errors.IsNotAcceptable(err) || k8s_errors.IsInvalid(err) { - level.Error(l).Log("msg", "error updating Deployment. Attempting to recreate", "err", err.Error()) - // Resource version should only be set when updating - d.ResourceVersion = "" - - err = c.Delete(ctx, d) - if err != nil { - return fmt.Errorf("failed to update Deployment: deleting old Deployment: %w", err) - } - err = c.Create(ctx, d) - if err != nil { - return fmt.Errorf("failed to update Deployment: creating new Deployment: %w", err) - } - } else if err != nil { - return fmt.Errorf("failed to update Deployment: %w", err) - } - } - - return nil -} - -func mergeOwnerReferences(new, old []meta_v1.OwnerReference) []meta_v1.OwnerReference { - existing := make(map[types.UID]bool) - for _, ref := range old { - existing[ref.UID] = true - } - for _, ref := range new { - if _, ok := existing[ref.UID]; !ok { - old = append(old, ref) - } - } - return old -} - -func mergeMaps(new, old map[string]string) map[string]string { - if old == nil { - old = make(map[string]string, len(new)) - } - for k, v := range new { - old[k] = v - } - return old -} diff --git a/internal/static/operator/clientutil/merge.go b/internal/static/operator/clientutil/merge.go deleted file mode 100644 index 5e43e951a8..0000000000 --- a/internal/static/operator/clientutil/merge.go +++ /dev/null @@ -1,64 +0,0 @@ -package clientutil - -import ( - "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/strategicpatch" -) - -// MergePatchContainers adds patches to base using a strategic merge patch and -// iterating by container name, failing on the first error. -// -// Copied from github.com/prometheus-operator/prometheus-operator/pkg/k8sutil. -func MergePatchContainers(base, patches []v1.Container) ([]v1.Container, error) { - var out []v1.Container - - // map of containers that still need to be patched by name - containersToPatch := make(map[string]v1.Container) - for _, c := range patches { - containersToPatch[c.Name] = c - } - - for _, container := range base { - // If we have a patch result, iterate over each container and try and calculate the patch - if patchContainer, ok := containersToPatch[container.Name]; ok { - // Get the json for the container and the patch - containerBytes, err := json.Marshal(container) - if err != nil { - return nil, fmt.Errorf("failed to marshal json for container %s: %w", container.Name, err) - } - patchBytes, err := json.Marshal(patchContainer) - if err != nil { - return nil, fmt.Errorf("failed to marshal json for patch container %s: %w", container.Name, err) - } - - // Calculate the patch result - jsonResult, err := strategicpatch.StrategicMergePatch(containerBytes, patchBytes, v1.Container{}) - if err != nil { - return nil, fmt.Errorf("failed to generate merge patch for %s: %w", container.Name, err) - } - var patchResult v1.Container - if err := json.Unmarshal(jsonResult, &patchResult); err != nil { - return nil, fmt.Errorf("failed to unmarshal merged container %s: %w", container.Name, err) - } - - // Add the patch result and remove the corresponding key from the to do list - out = append(out, patchResult) - delete(containersToPatch, container.Name) - } else { - // This container didn't need to be patched - out = append(out, container) - } - } - - // Iterate over the patches and add all the containers that were not previously part of a patch result - for _, container := range patches { - if _, ok := containersToPatch[container.Name]; ok { - out = append(out, container) - } - } - - return out, nil -} diff --git a/internal/static/operator/config/config.go b/internal/static/operator/config/config.go deleted file mode 100644 index ea32ddaf0a..0000000000 --- a/internal/static/operator/config/config.go +++ /dev/null @@ -1,176 +0,0 @@ -// Package config generates Grafana Agent configuration based on Kubernetes -// resources. -package config - -import ( - "embed" - "encoding/json" - "fmt" - "io/fs" - "path" - - "github.com/fatih/structs" - jsonnet "github.com/google/go-jsonnet" - "github.com/google/go-jsonnet/ast" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/assets" - "gopkg.in/yaml.v3" -) - -// Type is the type of Agent deployment that a config is being generated -// for. -type Type int - -const ( - // MetricsType generates a configuration for metrics. - MetricsType Type = iota + 1 - // LogsType generates a configuration for logs. - LogsType - // IntegrationsType generates a configuration for integrations. - IntegrationsType -) - -// String returns the string form of Type. -func (t Type) String() string { - switch t { - case MetricsType: - return "metrics" - case LogsType: - return "logs" - case IntegrationsType: - return "integrations" - default: - return fmt.Sprintf("unknown (%d)", int(t)) - } -} - -//go:embed templates/* -var templates embed.FS - -// TODO(rfratto): the "Optional" field of secrets is currently ignored. - -// BuildConfig builds an Agent configuration file. -func BuildConfig(d *gragent.Deployment, ty Type) (string, error) { - vm, err := createVM(d.Secrets) - if err != nil { - return "", err - } - - bb, err := jsonnetMarshal(d) - if err != nil { - return "", err - } - - vm.TLACode("ctx", string(bb)) - - switch ty { - case MetricsType: - return vm.EvaluateFile("./agent-metrics.libsonnet") - case LogsType: - return vm.EvaluateFile("./agent-logs.libsonnet") - case IntegrationsType: - return vm.EvaluateFile("./agent-integrations.libsonnet") - default: - panic(fmt.Sprintf("unexpected config type %v", ty)) - } -} - -func createVM(secrets assets.SecretStore) (*jsonnet.VM, error) { - vm := jsonnet.MakeVM() - vm.StringOutput = true - - templatesContents, err := fs.Sub(templates, "templates") - if err != nil { - return nil, err - } - - vm.Importer(NewFSImporter(templatesContents, []string{"./"})) - - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "marshalYAML", - Params: ast.Identifiers{"object"}, - Func: func(i []interface{}) (interface{}, error) { - bb, err := yaml.Marshal(i[0]) - if err != nil { - return nil, jsonnet.RuntimeError{Msg: err.Error()} - } - return string(bb), nil - }, - }) - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "unmarshalYAML", - Params: ast.Identifiers{"text"}, - Func: unmarshalYAML, - }) - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "intoStages", - Params: ast.Identifiers{"text"}, - Func: intoStages, - }) - - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "trimOptional", - Params: ast.Identifiers{"value"}, - Func: func(i []interface{}) (interface{}, error) { - m := i[0].(map[string]interface{}) - trimMap(m) - return m, nil - }, - }) - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "secretLookup", - Params: ast.Identifiers{"key"}, - Func: func(i []interface{}) (interface{}, error) { - if i[0] == nil { - return nil, nil - } - - k := assets.Key(i[0].(string)) - val, ok := secrets[k] - if !ok { - return nil, jsonnet.RuntimeError{Msg: fmt.Sprintf("key not provided: %s", k)} - } - return val, nil - }, - }) - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "secretPath", - Params: ast.Identifiers{"key"}, - Func: func(i []interface{}) (interface{}, error) { - if i[0] == nil { - return nil, nil - } - - key := SanitizeLabelName(i[0].(string)) - return path.Join("/var/lib/grafana-agent/secrets", key), nil - }, - }) - - vm.NativeFunction(&jsonnet.NativeFunction{ - Name: "sanitize", - Params: ast.Identifiers{"text"}, - Func: func(i []interface{}) (interface{}, error) { - if len(i) != 1 { - return nil, jsonnet.RuntimeError{Msg: "inappropriate number of arguments"} - } - s, ok := i[0].(string) - if !ok { - return nil, jsonnet.RuntimeError{Msg: "text must be a string"} - } - return SanitizeLabelName(s), nil - }, - }) - - return vm, nil -} - -// jsonnetMarshal marshals a value for passing to Jsonnet. This marshals to a -// JSON representation of the Go value, ignoring all json struct tags. Fields -// must be access as they would from Go, with the exception of embedded fields, -// which must be accessed through the embedded type name (a.Embedded.Field). -func jsonnetMarshal(v interface{}) ([]byte, error) { - if structs.IsStruct(v) { - return json.Marshal(structs.Map(v)) - } - return json.Marshal(v) -} diff --git a/internal/static/operator/config/config_references.go b/internal/static/operator/config/config_references.go deleted file mode 100644 index 91eee4b7d2..0000000000 --- a/internal/static/operator/config/config_references.go +++ /dev/null @@ -1,75 +0,0 @@ -package config - -import ( - "github.com/grafana/agent/internal/util/structwalk" - prom "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// AssetReference is a namespaced Secret or ConfigMap selector. -type AssetReference struct { - Namespace string - Reference prom.SecretOrConfigMap -} - -// AssetReferences returns all secret or configmap selectors used throughout v. -func AssetReferences(v interface{}) []AssetReference { - var refs []AssetReference - w := assetReferencesWalker{ - addReference: func(ar AssetReference) { - refs = append(refs, ar) - }, - } - structwalk.Walk(&w, v) - return refs -} - -type assetReferencesWalker struct { - namespace string - addReference func(ar AssetReference) -} - -func (arw *assetReferencesWalker) Visit(v interface{}) (w structwalk.Visitor) { - if v == nil { - return nil - } - - // If we've come across a namespaced object, create a new visitor for that - // namespace. - if o, ok := v.(metav1.Object); ok { - return &assetReferencesWalker{ - namespace: o.GetNamespace(), - addReference: arw.addReference, - } - } - - switch sel := v.(type) { - case corev1.SecretKeySelector: - if sel.Key != "" && sel.Name != "" { - arw.addReference(AssetReference{ - Namespace: arw.namespace, - Reference: prom.SecretOrConfigMap{Secret: &sel}, - }) - } - case *corev1.SecretKeySelector: - arw.addReference(AssetReference{ - Namespace: arw.namespace, - Reference: prom.SecretOrConfigMap{Secret: sel}, - }) - case corev1.ConfigMapKeySelector: - if sel.Key != "" && sel.Name != "" { - arw.addReference(AssetReference{ - Namespace: arw.namespace, - Reference: prom.SecretOrConfigMap{ConfigMap: &sel}, - }) - } - case *corev1.ConfigMapKeySelector: - arw.addReference(AssetReference{ - Namespace: arw.namespace, - Reference: prom.SecretOrConfigMap{ConfigMap: sel}, - }) - } - - return arw -} diff --git a/internal/static/operator/config/config_references_test.go b/internal/static/operator/config/config_references_test.go deleted file mode 100644 index 515858a379..0000000000 --- a/internal/static/operator/config/config_references_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package config - -import ( - "testing" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - prom "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestDeployment_AssetReferences(t *testing.T) { - deployment := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{ - Namespace: "agent", - }, - Spec: gragent.GrafanaAgentSpec{ - APIServerConfig: &prom.APIServerConfig{ - BasicAuth: &prom.BasicAuth{ - Username: corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "spec-apiserverconfig-basicauth-username", - }, - Key: "key", - }, - }, - }, - }, - }, - Metrics: []gragent.MetricsDeployment{{ - Instance: &gragent.MetricsInstance{ - ObjectMeta: v1.ObjectMeta{Namespace: "metrics-instance"}, - }, - PodMonitors: []*prom.PodMonitor{{ - ObjectMeta: v1.ObjectMeta{Namespace: "pmon"}, - }}, - Probes: []*prom.Probe{{ - ObjectMeta: v1.ObjectMeta{Namespace: "probe"}, - }}, - ServiceMonitors: []*prom.ServiceMonitor{{ - ObjectMeta: v1.ObjectMeta{ - Namespace: "smon", - }, - Spec: prom.ServiceMonitorSpec{ - Endpoints: []prom.Endpoint{{ - BearerTokenSecret: corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "prometheis-servicemonitors-spec-endpoints-bearertokensecret", - }, - Key: "key", - }, - }}, - }, - }}, - }}, - } - - require.Equal(t, []AssetReference{ - { - Namespace: "agent", - Reference: prom.SecretOrConfigMap{ - Secret: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "spec-apiserverconfig-basicauth-username", - }, - Key: "key", - }, - }, - }, - { - Namespace: "smon", - Reference: prom.SecretOrConfigMap{ - Secret: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "prometheis-servicemonitors-spec-endpoints-bearertokensecret", - }, - Key: "key", - }, - }, - }, - }, AssetReferences(deployment)) -} diff --git a/internal/static/operator/config/config_test.go b/internal/static/operator/config/config_test.go deleted file mode 100644 index 2d499134c5..0000000000 --- a/internal/static/operator/config/config_test.go +++ /dev/null @@ -1,492 +0,0 @@ -package config - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - k8s_yaml "sigs.k8s.io/yaml" - - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/subset" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" -) - -func TestBuildConfigMetrics(t *testing.T) { - var store = make(assets.SecretStore) - - store[assets.Key("/secrets/default/example-secret/key")] = "somesecret" - store[assets.Key("/configMaps/default/example-cm/key")] = "somecm" - store[assets.Key("/secrets/default/client-id/client_id")] = "my-client-id" - store[assets.Key("/secrets/default/client-secret/client_secret")] = "somesecret-client-secret" - - tt := []struct { - input string - expect string - }{ - { - input: util.Untab(` - metadata: - name: example - namespace: default - spec: - logLevel: debug - metrics: - scrapeInterval: 15s - scrapeTimeout: 10s - externalLabels: - cluster: prod - foo: bar - remoteWrite: - - name: rw-1 - url: http://localhost:9090/api/v1/write - `), - expect: util.Untab(` - server: - log_level: debug - - metrics: - wal_directory: /var/lib/grafana-agent/data - global: - scrape_interval: 15s - scrape_timeout: 10s - external_labels: - cluster: prod - foo: bar - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - remote_write: - - name: rw-1 - url: http://localhost:9090/api/v1/write - `), - }, - { - input: util.Untab(` - metadata: - name: example - namespace: default - spec: - logLevel: debug - metrics: - scrapeInterval: 15s - scrapeTimeout: 10s - externalLabels: - cluster: prod - foo: bar - remoteWrite: - - url: http://localhost:9090/api/v1/write - basicAuth: - username: - name: example-secret - key: key - password: - name: example-secret - key: pword - tlsConfig: - ca: - configMap: - name: example-cm - key: key - cert: - secret: - name: example-secret - key: key - keySecret: - name: example-secret - key: key - `), - expect: util.Untab(` - server: - log_level: debug - - metrics: - wal_directory: /var/lib/grafana-agent/data - global: - scrape_interval: 15s - scrape_timeout: 10s - external_labels: - cluster: prod - foo: bar - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - remote_write: - - url: http://localhost:9090/api/v1/write - basic_auth: - username: somesecret - password_file: /var/lib/grafana-agent/secrets/_secrets_default_example_secret_pword - tls_config: - ca_file: /var/lib/grafana-agent/secrets/_configMaps_default_example_cm_key - cert_file: /var/lib/grafana-agent/secrets/_secrets_default_example_secret_key - key_file: /var/lib/grafana-agent/secrets/_secrets_default_example_secret_key - `), - }, - { - input: util.Untab(` -metadata: - name: example - namespace: default -spec: - logLevel: debug - metrics: - scrapeInterval: 15s - scrapeTimeout: 10s - externalLabels: - cluster: prod - foo: bar - remoteWrite: - - url: http://localhost:9090/api/v1/write - oauth2: - clientId: - secret: - key: client_id - name: client-id - clientSecret: - key: client_secret - name: my-client-secret - tokenUrl: https://auth.example.com/realms/master/protocol/openid-connect/token - - url: http://localhost:9090/api/v1/write - oauth2: - clientId: - secret: - key: client_id - name: client-id - clientSecret: - key: client_secret - name: my-client-secret - # test optional parameters endpointParams and scopes - endpointParams: - params-key0: params-value - params-key1: params-value - scopes: - - value0 - - value1 - tokenUrl: https://auth.example.com/realms/master/protocol/openid-connect/token - `), - expect: util.Untab(` -server: - log_level: debug - -metrics: - wal_directory: /var/lib/grafana-agent/data - global: - scrape_interval: 15s - scrape_timeout: 10s - external_labels: - cluster: prod - foo: bar - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - remote_write: - - url: http://localhost:9090/api/v1/write - oauth2: - client_id: my-client-id - client_secret_file: /var/lib/grafana-agent/secrets/_secrets_default_my_client_secret_client_secret - token_url: https://auth.example.com/realms/master/protocol/openid-connect/token - - url: http://localhost:9090/api/v1/write - oauth2: - client_id: my-client-id - client_secret_file: /var/lib/grafana-agent/secrets/_secrets_default_my_client_secret_client_secret - endpoint_params: - params-key0: params-value - params-key1: params-value - scopes: - - value0 - - value1 - token_url: https://auth.example.com/realms/master/protocol/openid-connect/token - `), - }, - } - - for i, tc := range tt { - t.Run(fmt.Sprintf("index_%d", i), func(t *testing.T) { - var spec gragent.GrafanaAgent - err := k8s_yaml.Unmarshal([]byte(tc.input), &spec) - require.NoError(t, err) - - d := gragent.Deployment{Agent: &spec, Secrets: store} - result, err := BuildConfig(&d, MetricsType) - require.NoError(t, err) - - if !assert.YAMLEq(t, tc.expect, result) { - fmt.Println(result) - } - }) - } -} - -func TestAdditionalScrapeConfigsMetrics(t *testing.T) { - var store = make(assets.SecretStore) - - additionalSelector := &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "configs"}, - Key: "configs", - } - - input := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "agent", - }, - Spec: gragent.GrafanaAgentSpec{ - Image: ptr.To("grafana/agent:latest"), - ServiceAccountName: "agent", - Metrics: gragent.MetricsSubsystemSpec{ - InstanceSelector: &meta_v1.LabelSelector{ - MatchLabels: map[string]string{"agent": "agent"}, - }, - }, - }, - }, - Metrics: []gragent.MetricsDeployment{{ - Instance: &gragent.MetricsInstance{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "primary", - }, - Spec: gragent.MetricsInstanceSpec{ - RemoteWrite: []gragent.RemoteWriteSpec{{ - URL: "http://cortex:80/api/prom/push", - }}, - AdditionalScrapeConfigs: additionalSelector, - }, - }, - }}, - - Secrets: store, - } - - store[assets.KeyForSecret("operator", additionalSelector)] = util.Untab(` - - job_name: job - kubernetes_sd_configs: - - role: node - - job_name: ec2 - ec2_sd_configs: - - region: eu-west-1 - port: 9100 - `) - - expect := util.Untab(` -server: {} - -metrics: - wal_directory: /var/lib/grafana-agent/data - global: - external_labels: - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - cluster: operator/agent - configs: - - name: operator/primary - remote_write: - - url: http://cortex:80/api/prom/push - scrape_configs: - - job_name: job - kubernetes_sd_configs: - - role: node - - job_name: ec2 - ec2_sd_configs: - - region: eu-west-1 - port: 9100 - `) - - result, err := BuildConfig(&input, MetricsType) - require.NoError(t, err) - - if !assert.YAMLEq(t, expect, result) { - fmt.Println(result) - } -} - -func TestBuildConfigLogs(t *testing.T) { - var store = make(assets.SecretStore) - - store[assets.Key("/secrets/default/example-secret/key")] = "somesecret" - store[assets.Key("/configMaps/default/example-cm/key")] = "somecm" - - tt := []struct { - input string - expect string - }{ - { - input: util.Untab(` - metadata: - name: example - namespace: default - spec: - logLevel: debug - `), - expect: util.Untab(` - server: - log_level: debug - logs: - positions_directory: /var/lib/grafana-agent/data - `), - }, - } - - for i, tc := range tt { - t.Run(fmt.Sprintf("index_%d", i), func(t *testing.T) { - var spec gragent.GrafanaAgent - err := k8s_yaml.Unmarshal([]byte(tc.input), &spec) - require.NoError(t, err) - - d := gragent.Deployment{Agent: &spec, Secrets: store} - result, err := BuildConfig(&d, LogsType) - require.NoError(t, err) - - if !assert.YAMLEq(t, tc.expect, result) { - fmt.Println(result) - } - }) - } -} - -func TestBuildConfigIntegrations(t *testing.T) { - in := util.Untab(` - Agent: - kind: GrafanaAgent - metadata: - name: test-agent - namespace: monitoring - Integrations: - - Instance: - kind: MetricsIntegration - metadata: - name: mysql-a - namespace: databases - spec: - name: mysqld_exporter - config: - data_source_names: root@(server-a:3306)/ - - Instance: - kind: MetricsIntegration - metadata: - name: node - namespace: kube-system - spec: - name: node_exporter - type: - allNodes: true - unique: true - config: - rootfs_path: /host/root - sysfs_path: /host/sys - procfs_path: /host/proc - - Instance: - metadata: - name: mysql-b - namespace: databases - spec: - name: mysqld_exporter - config: - data_source_names: root@(server-b:3306)/ - - Instance: - kind: MetricsIntegration - metadata: - name: redis-a - namespace: databases - spec: - name: redis_exporter - config: - redis_addr: redis-a:6379 - `) - - var h gragent.Deployment - err := k8s_yaml.UnmarshalStrict([]byte(in), &h) - require.NoError(t, err) - - expect := util.Untab(` - server: {} - logs: - positions_directory: /var/lib/grafana-agent/data - metrics: - global: - external_labels: - cluster: monitoring/test-agent - wal_directory: /var/lib/grafana-agent/data - integrations: - metrics: - autoscrape: - enable: false - mysqld_exporter_configs: - - data_source_names: root@(server-a:3306)/ - - data_source_names: root@(server-b:3306)/ - node_exporter_configs: - - rootfs_path: /host/root - sysfs_path: /host/sys - procfs_path: /host/proc - redis_exporter_configs: - - redis_addr: redis-a:6379 - `) - - result, err := BuildConfig(&h, IntegrationsType) - require.NoError(t, err) - - require.NoError(t, subset.YAMLAssert([]byte(expect), []byte(result)), "incomplete yaml\n%s", result) -} - -// TestBuildConfigIntegrations_Instances ensures that metrics and logs -// instances are injected into the resulting config so integrations can use -// them for sending telemetry data. -func TestBuildConfigIntegrations_Instances(t *testing.T) { - in := util.Untab(` - Agent: - kind: GrafanaAgent - metadata: - name: test-agent - namespace: monitoring - Metrics: - - Instance: - kind: MetricsInstance - metadata: - name: operator-metrics - namespace: primary - spec: - remoteWrite: - - url: http://cortex:80/api/prom/push - Logs: - - Instance: - kind: LogsInstance - metadata: - name: operator-logs - namespace: primary - spec: - clients: - - url: http://loki:80/loki/api/v1/push - `) - - var h gragent.Deployment - err := k8s_yaml.UnmarshalStrict([]byte(in), &h) - require.NoError(t, err) - - expect := util.Untab(` - server: {} - metrics: - global: - external_labels: - cluster: monitoring/test-agent - wal_directory: /var/lib/grafana-agent/data - configs: - - name: primary/operator-metrics - remote_write: - - url: http://cortex:80/api/prom/push - logs: - positions_directory: /var/lib/grafana-agent/data - configs: - - name: primary/operator-logs - clients: - - url: http://loki:80/loki/api/v1/push - integrations: - metrics: - autoscrape: - enable: false - `) - - result, err := BuildConfig(&h, IntegrationsType) - require.NoError(t, err) - - require.NoError(t, subset.YAMLAssert([]byte(expect), []byte(result)), "incomplete yaml\n%s", result) -} diff --git a/internal/static/operator/config/fs_importer.go b/internal/static/operator/config/fs_importer.go deleted file mode 100644 index 2a8044c0c9..0000000000 --- a/internal/static/operator/config/fs_importer.go +++ /dev/null @@ -1,71 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "io" - "io/fs" - "path" - "strings" - - jsonnet "github.com/google/go-jsonnet" -) - -// FSImporter implements jsonnet.Importer for a fs.FS. -type FSImporter struct { - fs fs.FS - - cache map[string]jsonnet.Contents - paths []string -} - -// NewFSImporter creates a new jsonnet VM Importer that uses the given fs. -func NewFSImporter(f fs.FS, paths []string) *FSImporter { - return &FSImporter{ - fs: f, - cache: make(map[string]jsonnet.Contents), - paths: paths, - } -} - -// Import implements jsonnet.Importer. -func (i *FSImporter) Import(importedFrom, importedPath string) (contents jsonnet.Contents, foundAt string, err error) { - tryPaths := append([]string{importedFrom}, i.paths...) - for _, p := range tryPaths { - cleanedPath := path.Clean( - path.Join(path.Dir(p), importedPath), - ) - cleanedPath = strings.TrimPrefix(cleanedPath, "./") - - c, fa, err := i.tryImport(cleanedPath) - if err == nil { - return c, fa, err - } - } - - return jsonnet.Contents{}, "", fmt.Errorf("no such file: %s", importedPath) -} - -func (i *FSImporter) tryImport(path string) (contents jsonnet.Contents, foundAt string, err error) { - // jsonnet expects the same "foundAt" to always return the same instance of - // contents, so we need to return a cache here. - if c, ok := i.cache[path]; ok { - return c, path, nil - } - - f, err := i.fs.Open(path) - if err != nil { - err = jsonnet.RuntimeError{Msg: err.Error()} - return - } - - var buf bytes.Buffer - if _, copyErr := io.Copy(&buf, f); copyErr != nil { - err = jsonnet.RuntimeError{Msg: copyErr.Error()} - return - } - - contents = jsonnet.MakeContents(buf.String()) - i.cache[path] = contents - return contents, path, nil -} diff --git a/internal/static/operator/config/integration_templates_test.go b/internal/static/operator/config/integration_templates_test.go deleted file mode 100644 index 61a9b54763..0000000000 --- a/internal/static/operator/config/integration_templates_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package config - -import ( - "testing" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/subset" - "github.com/stretchr/testify/require" - apiext_v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "sigs.k8s.io/yaml" -) - -func TestIntegration(t *testing.T) { - toJSON := func(in string) apiext_v1.JSON { - t.Helper() - out, err := yaml.YAMLToJSONStrict([]byte(in)) - require.NoError(t, err) - return apiext_v1.JSON{Raw: out} - } - - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "configured integration", - input: map[string]interface{}{ - "integration": &gragent.Integration{ - Spec: gragent.IntegrationSpec{ - Name: "mysqld_exporter", - Config: toJSON(` - data_source_names: root@(server-a:3306)/ - `), - }, - }, - }, - expect: util.Untab(` - data_source_names: root@(server-a:3306)/ - `), - }, - { - name: "integration no config", - input: map[string]interface{}{ - "integration": &gragent.Integration{ - Spec: gragent.IntegrationSpec{ - Name: "mysqld_exporter", - }, - }, - }, - expect: util.Untab(`{}`), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - actual, err := runSnippetTLA(t, vm, "./integrations.libsonnet", tc.input) - require.NoError(t, err) - require.NoError(t, subset.YAMLAssert([]byte(tc.expect), []byte(actual)), "incomplete yaml\n%s", actual) - }) - } -} diff --git a/internal/static/operator/config/logs_templates_test.go b/internal/static/operator/config/logs_templates_test.go deleted file mode 100644 index 5f38b801a2..0000000000 --- a/internal/static/operator/config/logs_templates_test.go +++ /dev/null @@ -1,762 +0,0 @@ -package config - -import ( - "fmt" - "strings" - "testing" - - jsonnet "github.com/google/go-jsonnet" - prom "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/grafana/agent/internal/util" -) - -func TestLogsClientConfig(t *testing.T) { - agent := &gragent.GrafanaAgent{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "telemetry", - Name: "grafana-agent", - }, - } - - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "all-in-one URL", - input: map[string]interface{}{ - "agent": agent, - "namespace": "operator", - "spec": &gragent.LogsClientSpec{ - URL: "http://username:password@localhost:3100/loki/api/v1/push", - }, - }, - expect: util.Untab(` - url: http://username:password@localhost:3100/loki/api/v1/push - external_labels: - cluster: telemetry/grafana-agent - `), - }, - { - name: "full basic config", - input: map[string]interface{}{ - "agent": agent, - "namespace": "operator", - "spec": &gragent.LogsClientSpec{ - URL: "http://localhost:3100/loki/api/v1/push", - TenantID: "tenant", - BatchWait: "5m", - BatchSize: 500, - Timeout: "5m", - ExternalLabels: map[string]string{ - "foo": "bar", - "fizz": "buzz", - }, - ProxyURL: "http://proxy:3100/", - BackoffConfig: &gragent.LogsBackoffConfigSpec{ - MinPeriod: "500ms", - MaxPeriod: "5m", - MaxRetries: 100, - }, - }, - }, - expect: util.Untab(` - url: http://localhost:3100/loki/api/v1/push - tenant_id: tenant - batchwait: 5m - batchsize: 500 - proxy_url: http://proxy:3100/ - backoff_config: - min_period: 500ms - max_period: 5m - max_retries: 100 - external_labels: - cluster: telemetry/grafana-agent - foo: bar - fizz: buzz - timeout: 5m - `), - }, - { - name: "tls config", - input: map[string]interface{}{ - "agent": agent, - "namespace": "operator", - "spec": &gragent.LogsClientSpec{ - URL: "http://localhost:3100/loki/api/v1/push", - TLSConfig: &prom.TLSConfig{ - CAFile: "ca", - KeyFile: "key", - CertFile: "cert", - }, - }, - }, - expect: util.Untab(` - url: http://localhost:3100/loki/api/v1/push - tls_config: - ca_file: ca - key_file: key - cert_file: cert - external_labels: - cluster: telemetry/grafana-agent - `), - }, - { - name: "bearer tokens", - input: map[string]interface{}{ - "agent": agent, - "namespace": "operator", - "spec": &gragent.LogsClientSpec{ - URL: "http://localhost:3100/loki/api/v1/push", - BearerToken: "tok", - BearerTokenFile: "tokfile", - }, - }, - expect: util.Untab(` - url: http://localhost:3100/loki/api/v1/push - bearer_token: tok - bearer_token_file: tokfile - external_labels: - cluster: telemetry/grafana-agent - `), - }, - { - name: "basic auth", - input: map[string]interface{}{ - "agent": agent, - "namespace": "operator", - "spec": &gragent.LogsClientSpec{ - URL: "http://localhost:3100/loki/api/v1/push", - BasicAuth: &prom.BasicAuth{ - Username: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - Password: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - }, - expect: util.Untab(` - url: http://localhost:3100/loki/api/v1/push - basic_auth: - username: secretkey - password: secretkey - external_labels: - cluster: telemetry/grafana-agent - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - actual, err := runSnippetTLA(t, vm, "./component/logs/client.libsonnet", tc.input) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestLogsStages(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "docker", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Docker: &gragent.DockerStageSpec{}, - }}, - expect: util.Untab(`docker: {}`), - }, - { - name: "cri", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - CRI: &gragent.CRIStageSpec{}, - }}, - expect: util.Untab(`cri: {}`), - }, - { - name: "regex", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Regex: &gragent.RegexStageSpec{ - Source: "time", - Expression: "^(?P\\d+)", - }, - }}, - expect: util.Untab(` - regex: - expression: '^(?P\d+)' - source: time - `), - }, - { - name: "json", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - JSON: &gragent.JSONStageSpec{ - Expressions: map[string]string{"user": ""}, - Source: "extra", - }, - }}, - expect: util.Untab(` - json: - expressions: - user: "" - source: extra - `), - }, - { - name: "labelallow", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - LabelAllow: []string{"foo", "bar"}, - }}, - expect: util.Untab(` - labelallow: [foo, bar] - `), - }, - { - name: "labeldrop", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - LabelDrop: []string{"foo", "bar"}, - }}, - expect: util.Untab(` - labeldrop: [foo, bar] - `), - }, - { - name: "labels", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Labels: map[string]string{ - "foo": "", - "fizz": "buzz", - }, - }}, - expect: util.Untab(` - labels: - foo: "" - fizz: buzz - `), - }, - { - name: "limit", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Limit: &gragent.LimitStageSpec{ - Rate: 10, - Burst: 20, - Drop: false, - }, - }}, - expect: util.Untab(` - limit: - rate: 10 - burst: 20 - drop: false - `), - }, - { - name: "match", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Match: &gragent.MatchStageSpec{ - PipelineName: "app2", - Selector: `{app="pokey"}`, - Action: "keep", - DropCounterReason: "no_pokey", - Stages: util.Untab(` - - json: - expressions: - msg: msg - `), - }, - }}, - expect: util.Untab(` - match: - pipeline_name: app2 - selector: '{app="pokey"}' - action: keep - drop_counter_reason: no_pokey - stages: - - json: - expressions: - msg: msg - `), - }, - { - name: "metrics", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Metrics: map[string]gragent.MetricsStageSpec{ - "logs_line_total": { - Type: "counter", - Description: "total number of log lines", - Prefix: "my_promtail_custom_", - MaxIdleDuration: "24h", - MatchAll: boolPtr(true), - Action: "inc", - }, - "queue_elements": { - Type: "gauge", - Description: "elements in queue", - Action: "add", - }, - "http_response_time_seconds": { - Type: "histogram", - Source: "response_time", - Action: "inc", - Buckets: []string{"0.001", "0.0025", "0.050"}, - }, - }, - }}, - expect: util.Untab(` - metrics: - logs_line_total: - type: Counter - description: total number of log lines - prefix: my_promtail_custom_ - max_idle_duration: 24h - config: - match_all: true - action: inc - queue_elements: - type: Gauge - description: elements in queue - config: - action: add - http_response_time_seconds: - type: Histogram - source: response_time - config: - action: inc - buckets: [0.001, 0.0025, 0.050] - `), - }, - { - name: "multiline", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Multiline: &gragent.MultilineStageSpec{ - FirstLine: "first", - MaxWaitTime: "5m", - MaxLines: 5, - }, - }}, - expect: util.Untab(` - multiline: - firstline: first - max_wait_time: 5m - max_lines: 5 - `), - }, - { - name: "output", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Output: &gragent.OutputStageSpec{Source: "message"}, - }}, - expect: util.Untab(` - output: - source: message - `), - }, - { - name: "pack", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Pack: &gragent.PackStageSpec{ - Labels: []string{"foo", "bar"}, - IngestTimestamp: true, - }, - }}, - expect: util.Untab(` - pack: - labels: [foo, bar] - ingest_timestamp: true - `), - }, - { - name: "regex", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Regex: &gragent.RegexStageSpec{ - Source: "msg", - Expression: "some regex", - }, - }}, - expect: util.Untab(` - regex: - source: msg - expression: some regex - `), - }, - { - name: "replace", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Replace: &gragent.ReplaceStageSpec{ - Expression: "password (\\S+)", - Replace: "****", - Source: "msg", - }, - }}, - expect: util.Untab(` - replace: - expression: 'password (\S+)' - replace: '****' - source: msg - `), - }, - { - name: "template", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Template: &gragent.TemplateStageSpec{ - Source: "new_key", - Template: "hello world!", - }, - }}, - expect: util.Untab(` - template: - source: new_key - template: "hello world!" - `), - }, - { - name: "tenant", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Tenant: &gragent.TenantStageSpec{ - Label: "__meta_kubernetes_pod_label_fake", - Source: "customer_id", - Value: "fake", - }, - }}, - expect: util.Untab(` - tenant: - label: __meta_kubernetes_pod_label_fake - source: customer_id - value: fake - `), - }, - { - name: "timestamp", - input: map[string]interface{}{"spec": &gragent.PipelineStageSpec{ - Timestamp: &gragent.TimestampStageSpec{ - Source: "time", - Format: "RFC3339Nano", - FallbackFormats: []string{"UnixNs"}, - Location: "America/New_York", - ActionOnFailure: "fudge", - }, - }}, - expect: util.Untab(` - timestamp: - source: time - format: RFC3339Nano - fallback_formats: [UnixNs] - location: America/New_York - action_on_failure: fudge - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - actual, err := runSnippetTLA(t, vm, "./component/logs/stages.libsonnet", tc.input) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestPodLogsConfig(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "default", - input: map[string]interface{}{ - "agentNamespace": "operator", - "podLogs": gragent.PodLogs{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "podlogs", - }, - Spec: gragent.PodLogsSpec{ - RelabelConfigs: []*prom_v1.RelabelConfig{{ - SourceLabels: []prom.LabelName{"input_a", "input_b"}, - Separator: ";", - TargetLabel: "target_a", - Regex: "regex", - Modulus: 1234, - Replacement: "foobar", - Action: "replace", - }}, - }, - }, - "apiServer": prom_v1.APIServerConfig{}, - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - }, - expect: util.Untab(` - job_name: podLogs/operator/podlogs - kubernetes_sd_configs: - - role: pod - namespaces: - names: [operator] - relabel_configs: - - source_labels: [job] - target_label: __tmp_prometheus_job_name - - source_labels: [__meta_kubernetes_namespace] - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: service - - source_labels: [__meta_kubernetes_pod_name] - target_label: pod - - source_labels: [__meta_kubernetes_pod_container_name] - target_label: container - - target_label: job - replacement: operator/podlogs - - source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'] - target_label: __path__ - separator: / - replacement: /var/log/pods/*$1/*.log - - source_labels: ["input_a", "input_b"] - separator: ";" - target_label: "target_a" - regex: regex - modulus: 1234 - replacement: foobar - action: replace - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - actual, err := runSnippetTLA(t, vm, "./component/logs/pod_logs.libsonnet", tc.input) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestLogsConfig(t *testing.T) { - agent := &gragent.GrafanaAgent{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "operator", - Name: "grafana-agent", - }, - } - - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "global clients", - input: map[string]interface{}{ - "agent": agent, - "global": &gragent.LogsSubsystemSpec{ - Clients: []gragent.LogsClientSpec{{URL: "global"}}, - }, - "instance": &gragent.LogsDeployment{ - Instance: &gragent.LogsInstance{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "inst", - Name: "default", - }, - Spec: gragent.LogsInstanceSpec{}, - }, - }, - "apiServer": &prom.APIServerConfig{}, - - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - }, - expect: util.Untab(` - name: inst/default - clients: - - url: global - external_labels: - cluster: operator/grafana-agent - `), - }, - { - name: "local clients", - input: map[string]interface{}{ - "agent": agent, - "global": &gragent.LogsSubsystemSpec{ - Clients: []gragent.LogsClientSpec{{URL: "global"}}, - }, - "instance": &gragent.LogsDeployment{ - Instance: &gragent.LogsInstance{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "inst", - Name: "default", - }, - Spec: gragent.LogsInstanceSpec{ - Clients: []gragent.LogsClientSpec{{URL: "local"}}, - }, - }, - }, - "apiServer": &prom.APIServerConfig{}, - - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - }, - expect: util.Untab(` - name: inst/default - clients: - - url: local - external_labels: - cluster: operator/grafana-agent - `), - }, - { - name: "pod logs", - input: map[string]interface{}{ - "agent": agent, - "global": &gragent.LogsSubsystemSpec{}, - "instance": &gragent.LogsDeployment{ - Instance: &gragent.LogsInstance{ - ObjectMeta: metav1.ObjectMeta{Namespace: "inst", Name: "default"}, - }, - PodLogs: []*gragent.PodLogs{{ - ObjectMeta: metav1.ObjectMeta{Namespace: "app", Name: "pod"}, - }}, - }, - "apiServer": &prom.APIServerConfig{}, - - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - }, - expect: util.Untab(` - name: inst/default - scrape_configs: - - job_name: podLogs/app/pod - kubernetes_sd_configs: - - namespaces: - names: - - app - role: pod - relabel_configs: - - source_labels: - - job - target_label: __tmp_prometheus_job_name - - source_labels: - - __meta_kubernetes_namespace - target_label: namespace - - source_labels: - - __meta_kubernetes_service_name - target_label: service - - source_labels: - - __meta_kubernetes_pod_name - target_label: pod - - source_labels: - - __meta_kubernetes_pod_container_name - target_label: container - - replacement: app/pod - target_label: job - - source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'] - target_label: __path__ - separator: / - replacement: /var/log/pods/*$1/*.log - `), - }, - { - name: "additional scrape configs", - input: map[string]interface{}{ - "agent": agent, - "global": &gragent.LogsSubsystemSpec{}, - "instance": &gragent.LogsDeployment{ - Instance: &gragent.LogsInstance{ - ObjectMeta: metav1.ObjectMeta{Namespace: "inst", Name: "default"}, - Spec: gragent.LogsInstanceSpec{ - AdditionalScrapeConfigs: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "additional"}, - Key: "configs", - }, - }, - }, - }, - "apiServer": &prom.APIServerConfig{}, - - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - }, - expect: util.Untab(` - name: inst/default - scrape_configs: - - job_name: extra - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - s := testStore() - - s[assets.KeyForSecret("inst", &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: "additional", - }, - Key: "configs", - })] = `[{ "job_name": "extra" }]` - - vm, err := createVM(s) - require.NoError(t, err) - - actual, err := runSnippetTLA(t, vm, "./logs.libsonnet", tc.input) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func runSnippetTLA(t *testing.T, vm *jsonnet.VM, filename string, tla map[string]interface{}) (string, error) { - t.Helper() - - args := make([]string, 0, len(tla)) - for arg := range tla { - args = append(args, arg) - } - - boundArgs := make([]string, len(args)) - for i := range args { - boundArgs[i] = fmt.Sprintf("%[1]s=%[1]s", args[i]) - } - - // Bind argument to TLA. - for arg, value := range tla { - bb, err := jsonnetMarshal(value) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - return vm.EvaluateAnonymousSnippet( - filename, - fmt.Sprintf(` - local marshal = import './ext/marshal.libsonnet'; - local optionals = import './ext/optionals.libsonnet'; - local eval = import '%s'; - function(%s) marshal.YAML(optionals.trim(eval(%s))) - `, filename, strings.Join(args, ","), strings.Join(boundArgs, ",")), - ) -} - -func boolPtr(v bool) *bool { return &v } diff --git a/internal/static/operator/config/metrics_templates_test.go b/internal/static/operator/config/metrics_templates_test.go deleted file mode 100644 index e750ee8aa0..0000000000 --- a/internal/static/operator/config/metrics_templates_test.go +++ /dev/null @@ -1,1161 +0,0 @@ -package config - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/google/go-jsonnet" - prom_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/grafana/agent/internal/util" -) - -func TestExternalLabels(t *testing.T) { - tt := []struct { - name string - input interface{} - addReplica bool - expect string - }{ - { - name: "no replica", - addReplica: false, - input: gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "agent", - }, - }, - }, - expect: util.Untab(` - cluster: operator/agent - `), - }, - { - name: "defaults", - addReplica: true, - input: gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "agent", - }, - }, - }, - expect: util.Untab(` - cluster: operator/agent - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - `), - }, - { - name: "external_labels", - addReplica: true, - input: gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "agent", - }, - Spec: gragent.GrafanaAgentSpec{ - Metrics: gragent.MetricsSubsystemSpec{ - ExternalLabels: map[string]string{"foo": "bar"}, - }, - }, - }, - }, - expect: util.Untab(` - cluster: operator/agent - foo: bar - __replica__: replica-$(STATEFULSET_ORDINAL_NUMBER) - `), - }, - { - name: "custom labels", - addReplica: true, - input: gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "agent", - }, - Spec: gragent.GrafanaAgentSpec{ - Metrics: gragent.MetricsSubsystemSpec{ - MetricsExternalLabelName: ptr.To("deployment"), - ReplicaExternalLabelName: ptr.To("replica"), - ExternalLabels: map[string]string{"foo": "bar"}, - }, - }, - }, - }, - expect: util.Untab(` - deployment: operator/agent - foo: bar - replica: replica-$(STATEFULSET_ORDINAL_NUMBER) - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(nil) - require.NoError(t, err) - bb, err := jsonnetMarshal(tc.input) - require.NoError(t, err) - - vm.TLACode("ctx", string(bb)) - vm.TLACode("addReplica", fmt.Sprintf("%v", tc.addReplica)) - actual, err := runSnippet(vm, "./component/metrics/external_labels.libsonnet", "ctx", "addReplica") - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestKubeSDConfig(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "defaults", - input: map[string]interface{}{ - "namespace": "operator", - "role": "pod", - }, - expect: util.Untab(` - role: pod - `), - }, - { - name: "defaults", - input: map[string]interface{}{ - "namespace": "operator", - "namespaces": []string{"operator"}, - "role": "pod", - }, - expect: util.Untab(` - role: pod - namespaces: - names: [operator] - `), - }, - { - name: "host", - input: map[string]interface{}{ - "namespace": "operator", - "apiServer": &prom_v1.APIServerConfig{Host: "host"}, - "role": "pod", - }, - expect: util.Untab(` - role: pod - api_server: host - `), - }, - { - name: "basic auth", - input: map[string]interface{}{ - "namespace": "operator", - "apiServer": &prom_v1.APIServerConfig{ - BasicAuth: &prom_v1.BasicAuth{ - Username: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - Password: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - "role": "pod", - }, - expect: util.Untab(` - role: pod - basic_auth: - username: secretkey - password: secretkey - `), - }, - { - name: "bearer auth", - input: map[string]interface{}{ - "namespace": "operator", - "apiServer": &prom_v1.APIServerConfig{ - BearerToken: "bearer", - BearerTokenFile: "file", - }, - "role": "pod", - }, - expect: util.Untab(` - role: pod - authorization: - type: Bearer - credentials: bearer - credentials_file: file - `), - }, - { - name: "tls_config", - input: map[string]interface{}{ - "namespace": "operator", - "apiServer": &prom_v1.APIServerConfig{ - TLSConfig: &prom_v1.TLSConfig{ - CAFile: "ca", - }, - }, - "role": "pod", - }, - expect: util.Untab(` - role: pod - tls_config: - ca_file: ca - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{"namespace", "namespaces", "apiServer", "role"} - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/kube_sd_config.libsonnet", args...) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestPodMonitor(t *testing.T) { - var falseVal = false - var trueVal = true - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "default", - input: map[string]interface{}{ - "agentNamespace": "operator", - "monitor": prom_v1.PodMonitor{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "podmonitor", - }, - }, - "endpoint": prom_v1.PodMetricsEndpoint{ - Port: "metrics", - EnableHttp2: &falseVal, - FilterRunning: &trueVal, - }, - "index": 0, - "apiServer": prom_v1.APIServerConfig{}, - "overrideHonorLabels": false, - "overrideHonorTimestamps": false, - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - "enforcedSampleLimit": nil, - "enforcedTargetLimit": nil, - "shards": 1, - }, - expect: util.Untab(` - job_name: podMonitor/operator/podmonitor/0 - enable_http2: false - honor_labels: false - kubernetes_sd_configs: - - role: pod - namespaces: - names: [operator] - relabel_configs: - - source_labels: [job] - target_label: __tmp_prometheus_job_name - - source_labels: [__meta_kubernetes_pod_phase] - regex: (Failed|Succeeded) - action: drop - - source_labels: [__meta_kubernetes_pod_container_port_name] - regex: metrics - action: keep - - source_labels: [__meta_kubernetes_namespace] - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: service - - source_labels: [__meta_kubernetes_pod_name] - target_label: pod - - source_labels: [__meta_kubernetes_pod_container_name] - target_label: container - - target_label: job - replacement: operator/podmonitor - - target_label: endpoint - replacement: metrics - - source_labels: [__address__] - target_label: __tmp_hash - action: hashmod - modulus: 1 - - source_labels: [__tmp_hash] - action: keep - regex: $(SHARD) - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{ - "agentNamespace", "monitor", "endpoint", "index", "apiServer", "overrideHonorLabels", - "overrideHonorTimestamps", "ignoreNamespaceSelectors", "enforcedNamespaceLabel", - "enforcedSampleLimit", "enforcedTargetLimit", "shards", - } - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/pod_monitor.libsonnet", args...) - require.NoError(t, err) - if !assert.YAMLEq(t, tc.expect, actual) { - fmt.Fprintln(os.Stderr, actual) - } - }) - } -} - -func TestProbe(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "default", - input: map[string]interface{}{ - "agentNamespace": "operator", - "probe": prom_v1.Probe{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "probe", - }, - Spec: prom_v1.ProbeSpec{ - Module: "mod", - Targets: prom_v1.ProbeTargets{ - Ingress: &prom_v1.ProbeTargetIngress{ - Selector: meta_v1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - }, - }, - TLSConfig: &prom_v1.ProbeTLSConfig{ - SafeTLSConfig: prom_v1.SafeTLSConfig{ - InsecureSkipVerify: true, - }, - }, - }, - }, - "apiServer": prom_v1.APIServerConfig{}, - "overrideHonorTimestamps": false, - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - "enforcedSampleLimit": nil, - "enforcedTargetLimit": nil, - "shards": 1, - }, - expect: util.Untab(` - job_name: probe/operator/probe - honor_timestamps: true - kubernetes_sd_configs: - - role: ingress - namespaces: - names: [operator] - metrics_path: /probe - params: - module: ["mod"] - relabel_configs: - - source_labels: [job] - target_label: __tmp_prometheus_job_name - - action: keep - regex: bar - source_labels: [__meta_kubernetes_ingress_label_foo] - - action: replace - regex: (.+);(.+);(.+) - replacement: $1://$2$3 - separator: ; - source_labels: - - __meta_kubernetes_ingress_scheme - - __address__ - - __meta_kubernetes_ingress_path - target_label: __param_target - - source_labels: [__meta_kubernetes_namespace] - target_label: namespace - - source_labels: [__meta_kubernetes_ingress_name] - target_label: ingress - - source_labels: [__param_target] - target_label: instance - - replacement: "" - target_label: __address__ - tls_config: - insecure_skip_verify: true - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{ - "agentNamespace", "probe", "apiServer", "overrideHonorTimestamps", - "ignoreNamespaceSelectors", "enforcedNamespaceLabel", - "enforcedSampleLimit", "enforcedTargetLimit", "shards", - } - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/probe.libsonnet", args...) - require.NoError(t, err) - if !assert.YAMLEq(t, tc.expect, actual) { - fmt.Fprintln(os.Stderr, actual) - } - }) - } -} - -func TestRelabelConfig(t *testing.T) { - tt := []struct { - name string - input interface{} - expect string - }{ - { - name: "full", - input: prom_v1.RelabelConfig{ - SourceLabels: []prom_v1.LabelName{"input_a", "input_b"}, - Separator: ";", - TargetLabel: "target_a", - Regex: "regex", - Modulus: 1234, - Replacement: "foobar", - Action: "replace", - }, - expect: util.Untab(` - source_labels: ["input_a", "input_b"] - separator: ";" - target_label: "target_a" - regex: regex - modulus: 1234 - replacement: foobar - action: replace - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(nil) - require.NoError(t, err) - bb, err := jsonnetMarshal(tc.input) - require.NoError(t, err) - - vm.TLACode("cfg", string(bb)) - actual, err := runSnippet(vm, "./component/metrics/relabel_config.libsonnet", "cfg") - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestRemoteWrite(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "bare", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - `), - }, - { - name: "base configs", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - Name: "cortex", - URL: "http://cortex/api/prom/push", - RemoteTimeout: "5m", - Headers: map[string]string{"foo": "bar"}, - }, - }, - expect: util.Untab(` - name: cortex - url: http://cortex/api/prom/push - remote_timeout: 5m - headers: - foo: bar - `), - }, - { - name: "write_relabel_configs", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - WriteRelabelConfigs: []prom_v1.RelabelConfig{{ - SourceLabels: []prom_v1.LabelName{"__name__"}, - Action: "drop", - }}, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - write_relabel_configs: - - source_labels: [__name__] - action: drop - `), - }, - { - name: "tls_config", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - TLSConfig: &prom_v1.TLSConfig{ - CAFile: "ca", - CertFile: "cert", - }, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - tls_config: - ca_file: ca - cert_file: cert - `), - }, - { - name: "basic_auth", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - BasicAuth: &prom_v1.BasicAuth{ - Username: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - Password: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - basic_auth: - username: secretkey - password_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - `), - }, - { - name: "bearer_token", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - BearerToken: "my-token", - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - authorization: - type: Bearer - credentials: my-token - `), - }, - { - name: "bearer_token_file", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - BearerTokenFile: "/path/to/file", - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - authorization: - type: Bearer - credentials_file: /path/to/file - `), - }, - { - name: "sigv4", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - SigV4: &gragent.SigV4Config{ - Region: "region", - AccessKey: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - SecretKey: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - Profile: "profile", - RoleARN: "arn", - }, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - sigv4: - region: region - access_key: secretkey - secret_key: secretkey - profile: profile - role_arn: arn - `), - }, - { - name: "queue_config", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - QueueConfig: &gragent.QueueConfig{ - Capacity: 1000, - MinShards: 1, - MaxShards: 100, - MaxSamplesPerSend: 500, - BatchSendDeadline: "5m", - MinBackoff: "1m", - MaxBackoff: "5m", - }, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - queue_config: - capacity: 1000 - min_shards: 1 - max_shards: 100 - max_samples_per_send: 500 - batch_send_deadline: 5m - min_backoff: 1m - max_backoff: 5m - `), - }, - { - name: "metadata_config", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - MetadataConfig: &gragent.MetadataConfig{ - Send: true, - SendInterval: "5m", - }, - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - metadata_config: - send: true - send_interval: 5m - `), - }, - { - name: "proxy_url", - input: map[string]interface{}{ - "namespace": "operator", - "rw": gragent.RemoteWriteSpec{ - URL: "http://cortex/api/prom/push", - ProxyURL: "http://proxy", - }, - }, - expect: util.Untab(` - url: http://cortex/api/prom/push - proxy_url: http://proxy - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{"namespace", "rw"} - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/remote_write.libsonnet", args...) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestSafeTLSConfig(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "configmap", - input: map[string]interface{}{ - "namespace": "operator", - "config": prom_v1.SafeTLSConfig{ - ServerName: "server", - InsecureSkipVerify: true, - CA: prom_v1.SecretOrConfigMap{ - ConfigMap: &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - Cert: prom_v1.SecretOrConfigMap{ - ConfigMap: &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - KeySecret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - expect: util.Untab(` - ca_file: /var/lib/grafana-agent/secrets/_configMaps_operator_obj_key - cert_file: /var/lib/grafana-agent/secrets/_configMaps_operator_obj_key - key_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - server_name: server - insecure_skip_verify: true - `), - }, - { - name: "secrets", - input: map[string]interface{}{ - "namespace": "operator", - "config": prom_v1.SafeTLSConfig{ - ServerName: "server", - InsecureSkipVerify: true, - CA: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - Cert: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - KeySecret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - expect: util.Untab(` - ca_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - cert_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - key_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - server_name: server - insecure_skip_verify: true - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{"namespace", "config"} - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/safe_tls_config.libsonnet", args...) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func TestServiceMonitor(t *testing.T) { - trueVal := true - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "default", - input: map[string]interface{}{ - "agentNamespace": "operator", - "monitor": prom_v1.ServiceMonitor{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "servicemonitor", - }, - }, - "endpoint": prom_v1.Endpoint{ - Port: "metrics", - FilterRunning: &trueVal, - }, - "index": 0, - "apiServer": prom_v1.APIServerConfig{}, - "overrideHonorLabels": false, - "overrideHonorTimestamps": false, - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - "enforcedSampleLimit": nil, - "enforcedTargetLimit": nil, - "shards": 1, - }, - expect: util.Untab(` - job_name: serviceMonitor/operator/servicemonitor/0 - honor_labels: false - kubernetes_sd_configs: - - role: endpoints - namespaces: - names: [operator] - relabel_configs: - - source_labels: - - job - target_label: __tmp_prometheus_job_name - - action: keep - regex: metrics - source_labels: - - __meta_kubernetes_endpoint_port_name - - regex: Node;(.*) - replacement: $1 - separator: ; - source_labels: - - __meta_kubernetes_endpoint_address_target_kind - - __meta_kubernetes_endpoint_address_target_name - target_label: node - - regex: Pod;(.*) - replacement: $1 - separator: ; - source_labels: - - __meta_kubernetes_endpoint_address_target_kind - - __meta_kubernetes_endpoint_address_target_name - target_label: pod - - source_labels: - - __meta_kubernetes_namespace - target_label: namespace - - source_labels: - - __meta_kubernetes_service_name - target_label: service - - source_labels: - - __meta_kubernetes_pod_name - target_label: pod - - source_labels: - - __meta_kubernetes_pod_container_name - target_label: container - - source_labels: [__meta_kubernetes_pod_phase] - regex: (Failed|Succeeded) - action: drop - - replacement: $1 - source_labels: - - __meta_kubernetes_service_name - target_label: job - - replacement: metrics - target_label: endpoint - - action: hashmod - modulus: 1 - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - `), - }, - { - name: "no_filter_running", - input: map[string]interface{}{ - "agentNamespace": "operator", - "monitor": prom_v1.ServiceMonitor{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: "operator", - Name: "servicemonitor", - }, - }, - "endpoint": prom_v1.Endpoint{ - Port: "metrics", - }, - "index": 0, - "apiServer": prom_v1.APIServerConfig{}, - "overrideHonorLabels": false, - "overrideHonorTimestamps": false, - "ignoreNamespaceSelectors": false, - "enforcedNamespaceLabel": "", - "enforcedSampleLimit": nil, - "enforcedTargetLimit": nil, - "shards": 1, - }, - expect: util.Untab(` - job_name: serviceMonitor/operator/servicemonitor/0 - honor_labels: false - kubernetes_sd_configs: - - role: endpoints - namespaces: - names: [operator] - relabel_configs: - - source_labels: - - job - target_label: __tmp_prometheus_job_name - - action: keep - regex: metrics - source_labels: - - __meta_kubernetes_endpoint_port_name - - regex: Node;(.*) - replacement: $1 - separator: ; - source_labels: - - __meta_kubernetes_endpoint_address_target_kind - - __meta_kubernetes_endpoint_address_target_name - target_label: node - - regex: Pod;(.*) - replacement: $1 - separator: ; - source_labels: - - __meta_kubernetes_endpoint_address_target_kind - - __meta_kubernetes_endpoint_address_target_name - target_label: pod - - source_labels: - - __meta_kubernetes_namespace - target_label: namespace - - source_labels: - - __meta_kubernetes_service_name - target_label: service - - source_labels: - - __meta_kubernetes_pod_name - target_label: pod - - source_labels: - - __meta_kubernetes_pod_container_name - target_label: container - - replacement: $1 - source_labels: - - __meta_kubernetes_service_name - target_label: job - - replacement: metrics - target_label: endpoint - - action: hashmod - modulus: 1 - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{ - "agentNamespace", "monitor", "endpoint", "index", "apiServer", "overrideHonorLabels", - "overrideHonorTimestamps", "ignoreNamespaceSelectors", "enforcedNamespaceLabel", - "enforcedSampleLimit", "enforcedTargetLimit", "shards", - } - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/service_monitor.libsonnet", args...) - require.NoError(t, err) - if !assert.YAMLEq(t, tc.expect, actual) { - fmt.Fprintln(os.Stderr, actual) - } - }) - } -} - -func TestTLSConfig(t *testing.T) { - tt := []struct { - name string - input map[string]interface{} - expect string - }{ - { - name: "passthrough", - input: map[string]interface{}{ - "namespace": "operator", - "config": prom_v1.TLSConfig{ - SafeTLSConfig: prom_v1.SafeTLSConfig{ - ServerName: "server", - InsecureSkipVerify: true, - CA: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - Cert: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - KeySecret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - }, - }, - expect: util.Untab(` - ca_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - cert_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - key_file: /var/lib/grafana-agent/secrets/_secrets_operator_obj_key - server_name: server - insecure_skip_verify: true - `), - }, - { - name: "overrides", - input: map[string]interface{}{ - "namespace": "operator", - "config": prom_v1.TLSConfig{ - SafeTLSConfig: prom_v1.SafeTLSConfig{ - ServerName: "server", - InsecureSkipVerify: true, - CA: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - Cert: prom_v1.SecretOrConfigMap{ - Secret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - KeySecret: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - }, - }, - CAFile: "ca", - CertFile: "cert", - KeyFile: "key", - }, - }, - expect: util.Untab(` - ca_file: ca - cert_file: cert - key_file: key - server_name: server - insecure_skip_verify: true - `), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - vm, err := createVM(testStore()) - require.NoError(t, err) - - args := []string{"namespace", "config"} - for _, arg := range args { - bb, err := jsonnetMarshal(tc.input[arg]) - require.NoError(t, err) - vm.TLACode(arg, string(bb)) - } - - actual, err := runSnippet(vm, "./component/metrics/tls_config.libsonnet", args...) - require.NoError(t, err) - require.YAMLEq(t, tc.expect, actual) - }) - } -} - -func runSnippet(vm *jsonnet.VM, filename string, args ...string) (string, error) { - boundArgs := make([]string, len(args)) - for i := range args { - boundArgs[i] = fmt.Sprintf("%[1]s=%[1]s", args[i]) - } - - return vm.EvaluateAnonymousSnippet( - filename, - fmt.Sprintf(` - local marshal = import './ext/marshal.libsonnet'; - local optionals = import './ext/optionals.libsonnet'; - local eval = import '%s'; - function(%s) marshal.YAML(optionals.trim(eval(%s))) - `, filename, strings.Join(args, ","), strings.Join(boundArgs, ",")), - ) -} - -func testStore() assets.SecretStore { - store := make(assets.SecretStore) - - store[assets.KeyForConfigMap("operator", &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - })] = "secretcm" - - store[assets.KeyForSecret("operator", &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{Name: "obj"}, - Key: "key", - })] = "secretkey" - - return store -} diff --git a/internal/static/operator/config/templates/agent-integrations.libsonnet b/internal/static/operator/config/templates/agent-integrations.libsonnet deleted file mode 100644 index 304a0c6967..0000000000 --- a/internal/static/operator/config/templates/agent-integrations.libsonnet +++ /dev/null @@ -1,135 +0,0 @@ -// agent-integrations.libsonnet is the entrypoint for rendering a Grafana Agent -// config file for integrations based on the Operator custom resources. -// -// When writing an object, any field will null will be removed from the final -// YAML. This is useful as we don't want to always translate unfilled values -// from the custom resources to a field in the YAML. -// -// A series of helper methods to convert default values into null (so they can -// be trimmed) are in ./ext/optionals.libsonnet. -// -// When writing a new function, please document the expected types of the -// arguments. - -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; - -local new_integration = import './integrations.libsonnet'; -local new_logs_instance = import './logs.libsonnet'; -local new_metrics_instance = import './metrics.libsonnet'; -local new_external_labels = import 'component/metrics/external_labels.libsonnet'; -local new_remote_write = import 'component/metrics/remote_write.libsonnet'; - -local calculateShards(requested) = - if requested == null then 1 - else if requested > 1 then requested - else 1; - -// Renders a new config for integrations. The ctx should have all -// MetricsInstances and LogsInstances so integrations can self-collect -// telemetry data, but *Monitor-like resources are ignored. -// -// @param {config.Deployment} ctx -function(ctx) marshal.YAML(optionals.trim({ - local spec = ctx.Agent.Spec, - local metrics = spec.Metrics, - local logs = spec.Logs, - local namespace = ctx.Agent.ObjectMeta.Namespace, - - server: { - log_level: optionals.string(spec.LogLevel), - log_format: optionals.string(spec.LogFormat), - }, - - metrics: { - local scrubbed_instances = std.map( - function(inst) { - Instance: inst.Instance, - ServiceMonitors: [], - PodMonitors: [], - Probes: [], - }, - ctx.Metrics, - ), - - wal_directory: '/var/lib/grafana-agent/data', - global: { - // NOTE(rfratto): we don't want to add the replica label here, since - // there will never be more than one HA replica for a running - // integration. Adding a replica label will cause it to be subject to - // HA dedupe and risk being discarded depending on what the active - // replica is server-side. - external_labels: optionals.object(new_external_labels(ctx, false)), - scrape_interval: optionals.string(metrics.ScrapeInterval), - scrape_timeout: optionals.string(metrics.ScrapeTimeout), - remote_write: optionals.array(std.map( - function(rw) new_remote_write(ctx.Agent.ObjectMeta.Namespace, rw), - metrics.RemoteWrite, - )), - }, - configs: optionals.array(std.map( - function(inst) new_metrics_instance( - agentNamespace=ctx.Agent.ObjectMeta.Namespace, - instance=inst, - apiServer=spec.APIServerConfig, - overrideHonorLabels=metrics.OverrideHonorLabels, - overrideHonorTimestamps=metrics.OverrideHonorTimestamps, - ignoreNamespaceSelectors=metrics.IgnoreNamespaceSelectors, - enforcedNamespaceLabel=metrics.EnforcedNamespaceLabel, - enforcedSampleLimit=metrics.EnforcedSampleLimit, - enforcedTargetLimit=metrics.EnforcedTargetLimit, - shards=calculateShards(metrics.Shards), - ), - scrubbed_instances, - )), - }, - - logs: { - local scrubbed_instances = std.map( - function(inst) { - Instance: inst.Instance, - PodLogs: [], - }, - ctx.Logs, - ), - - positions_directory: '/var/lib/grafana-agent/data', - configs: optionals.array(std.map( - function(logs_inst) new_logs_instance( - agent=ctx.Agent, - global=logs, - instance=logs_inst, - apiServer=spec.APIServerConfig, - ignoreNamespaceSelectors=logs.IgnoreNamespaceSelectors, - enforcedNamespaceLabel=logs.EnforcedNamespaceLabel, - ), - scrubbed_instances, - )), - }, - - integrations: { - // Integrations should opt-in to autoscrape. - metrics: { - autoscrape: { - enable: false, - }, - }, - } + ( - // Iterate over our Integration CRs and map them to an object. All - // integrations are stored in a _configs array, even if they're - // unique. - std.foldl( - function(acc, element) acc { - [element.Instance.Spec.Name + '_configs']: ( - local key = element.Instance.Spec.Name + '_configs'; - local entry = new_integration(element.Instance); - - if std.objectHas(acc, key) then acc[key] + [entry] - else [entry] - ), - }, - ctx.Integrations, - {}, - ) - ), -})) diff --git a/internal/static/operator/config/templates/agent-logs.libsonnet b/internal/static/operator/config/templates/agent-logs.libsonnet deleted file mode 100644 index 6292480d08..0000000000 --- a/internal/static/operator/config/templates/agent-logs.libsonnet +++ /dev/null @@ -1,44 +0,0 @@ -// agent-logs.libsonnet is the entrypoint for rendering a Grafana Agent -// config file for logs based on the Operator custom resources. -// -// When writing an object, any field will null will be removed from the final -// YAML. This is useful as we don't want to always translate unfilled values -// from the custom resources to a field in the YAML. -// -// A series of helper methods to convert default values into null (so they can -// be trimmed) are in ./ext/optionals.libsonnet. -// -// When writing a new function, please document the expected types of the -// arguments. - -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; - -local new_logs_instance = import './logs.libsonnet'; - -// @param {config.Deployment} ctx -function(ctx) marshal.YAML(optionals.trim({ - local spec = ctx.Agent.Spec, - local logs = spec.Logs, - local namespace = ctx.Agent.ObjectMeta.Namespace, - - server: { - log_level: optionals.string(spec.LogLevel), - log_format: optionals.string(spec.LogFormat), - }, - - logs: { - positions_directory: '/var/lib/grafana-agent/data', - configs: optionals.array(std.map( - function(logs_inst) new_logs_instance( - agent=ctx.Agent, - global=logs, - instance=logs_inst, - apiServer=spec.APIServerConfig, - ignoreNamespaceSelectors=logs.IgnoreNamespaceSelectors, - enforcedNamespaceLabel=logs.EnforcedNamespaceLabel, - ), - ctx.Logs, - )), - }, -})) diff --git a/internal/static/operator/config/templates/agent-metrics.libsonnet b/internal/static/operator/config/templates/agent-metrics.libsonnet deleted file mode 100644 index 6effebb0dc..0000000000 --- a/internal/static/operator/config/templates/agent-metrics.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -// agent-metrics.libsonnet is the entrypoint for rendering a Grafana Agent -// config file for metrics based on the Operator custom resources. -// -// When writing an object, any field will null will be removed from the final -// YAML. This is useful as we don't want to always translate unfilled values -// from the custom resources to a field in the YAML. -// -// A series of helper methods to convert default values into null (so they can -// be trimmed) are in ./ext/optionals.libsonnet. -// -// When writing a new function, please document the expected types of the -// arguments. - -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; - -local new_metrics_instance = import './metrics.libsonnet'; -local new_external_labels = import 'component/metrics/external_labels.libsonnet'; -local new_remote_write = import 'component/metrics/remote_write.libsonnet'; - -local calculateShards(requested) = - if requested == null then 1 - else if requested > 1 then requested - else 1; - -// @param {config.Deployment} ctx -function(ctx) marshal.YAML(optionals.trim({ - local spec = ctx.Agent.Spec, - local metrics = spec.Metrics, - local namespace = ctx.Agent.ObjectMeta.Namespace, - - server: { - log_level: optionals.string(spec.LogLevel), - log_format: optionals.string(spec.LogFormat), - }, - - metrics: { - wal_directory: '/var/lib/grafana-agent/data', - global: { - external_labels: optionals.object(new_external_labels(ctx, true)), - scrape_interval: optionals.string(metrics.ScrapeInterval), - scrape_timeout: optionals.string(metrics.ScrapeTimeout), - remote_write: optionals.array(std.map( - function(rw) new_remote_write(ctx.Agent.ObjectMeta.Namespace, rw), - metrics.RemoteWrite, - )), - }, - configs: optionals.array(std.map( - function(inst) new_metrics_instance( - agentNamespace=ctx.Agent.ObjectMeta.Namespace, - instance=inst, - apiServer=spec.APIServerConfig, - overrideHonorLabels=metrics.OverrideHonorLabels, - overrideHonorTimestamps=metrics.OverrideHonorTimestamps, - ignoreNamespaceSelectors=metrics.IgnoreNamespaceSelectors, - enforcedNamespaceLabel=metrics.EnforcedNamespaceLabel, - enforcedSampleLimit=metrics.EnforcedSampleLimit, - enforcedTargetLimit=metrics.EnforcedTargetLimit, - shards=calculateShards(metrics.Shards), - ), - ctx.Metrics, - )), - }, -})) diff --git a/internal/static/operator/config/templates/component/logs/client.libsonnet b/internal/static/operator/config/templates/component/logs/client.libsonnet deleted file mode 100644 index 295f531593..0000000000 --- a/internal/static/operator/config/templates/component/logs/client.libsonnet +++ /dev/null @@ -1,51 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; - -local new_external_labels = import './external_labels.libsonnet'; -local new_tls_config = import 'component/metrics/tls_config.libsonnet'; - -// Generates the content of a client object to send logs to Loki. -// -// @param {GrafanaAgent} agent -// @param {string} namespace - namespace of spec. -// @param {LogsClientSpec} spec -function(agent, namespace, spec) { - url: spec.URL, - tls_config: - if spec.TLSConfig != null then new_tls_config(namespace, spec.TLSConfig), - proxy_url: optionals.string(spec.ProxyURL), - - tenant_id: optionals.string(spec.TenantID), - - timeout: optionals.string(spec.Timeout), - batchwait: optionals.string(spec.BatchWait), - batchsize: optionals.number(spec.BatchSize), - - basic_auth: if spec.BasicAuth != null then { - username: secrets.valueForSecret(namespace, spec.BasicAuth.Username), - password: secrets.valueForSecret(namespace, spec.BasicAuth.Password), - }, - oauth2: ( - if spec.OAuth2 != null then { - // TODO: client_id can also be stored in a config map: - // secrets.valueForConfigMap(namespace, spec.OAuth2.ClientID.ConfigMap), - local client_id = secrets.valueForSecret(namespace, spec.OAuth2.ClientID.Secret), - - client_id: client_id, - client_secret_file: secrets.pathForSecret(namespace, spec.OAuth2.ClientSecret), - endpoint_params: spec.OAuth2.EndpointParams, - scopes: spec.OAuth2.Scopes, - token_url: spec.OAuth2.TokenURL, - } - ), - bearer_token: optionals.string(spec.BearerToken), - bearer_token_file: optionals.string(spec.BearerTokenFile), - - backoff_config: if spec.BackoffConfig != null then { - min_period: optionals.string(spec.BackoffConfig.MinPeriod), - max_period: optionals.string(spec.BackoffConfig.MaxPeriod), - max_retries: optionals.number(spec.BackoffConfig.MaxRetries), - }, - - external_labels: optionals.object(new_external_labels(agent, spec)), -} diff --git a/internal/static/operator/config/templates/component/logs/external_labels.libsonnet b/internal/static/operator/config/templates/component/logs/external_labels.libsonnet deleted file mode 100644 index 563e899552..0000000000 --- a/internal/static/operator/config/templates/component/logs/external_labels.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -// Generates an external_label mapping. This includes the user-provided labels -// as well as the injected cluster label. -// -// @param {GrafanaAgent} agent -// @param {LogsClientSpec} client -function(agent, client) ( - local meta = agent.ObjectMeta; - local logs = agent.Spec.Logs; - - // Provide the cluster label first. Doing it this way allows the user to - // override with a value they choose. - ( - local clusterValue = '%s/%s' % [meta.Namespace, meta.Name]; - local clusterLabel = logs.LogsExternalLabelName; - - if clusterLabel == null then { cluster: clusterValue } - else if clusterLabel != '' then { [clusterLabel]: clusterValue } - else {} - ) + - - // Finally add in any user-configured labels. - ( - if client.ExternalLabels != null - then client.ExternalLabels - else {} - ) -) diff --git a/internal/static/operator/config/templates/component/logs/pod_logs.libsonnet b/internal/static/operator/config/templates/component/logs/pod_logs.libsonnet deleted file mode 100644 index 7ddb5c7e26..0000000000 --- a/internal/static/operator/config/templates/component/logs/pod_logs.libsonnet +++ /dev/null @@ -1,152 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_relabel_config = import './relabel_config.libsonnet'; -local new_safe_tls_config = import './safe_tls_config.libsonnet'; -local new_pipeline_stage = import './stages.libsonnet'; -local new_kube_sd_config = import 'component/metrics/kube_sd_config.libsonnet'; - -// Genrates a scrape_config from a PodLogs. -// -// @param {string} agentNamespace - Namespace the GrafanaAgent CR is in. -// @param {PodLogs} podLogs -// @param {APIServerConfig} apiServer -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -function( - agentNamespace, - podLogs, - apiServer, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, -) { - local meta = podLogs.ObjectMeta, - - job_name: 'podLogs/%s/%s' % [meta.Namespace, meta.Name], - - kubernetes_sd_configs: [ - new_kube_sd_config( - namespace=agentNamespace, - namespaces=k8s.namespacesFromSelector( - podLogs.Spec.NamespaceSelector, - meta.Namespace, - ignoreNamespaceSelectors, - ), - apiServer=apiServer, - role='pod', - ), - ], - - pipeline_stages: optionals.array(std.map( - function(pipeline) new_pipeline_stage(pipeline), - podLogs.Spec.PipelineStages, - )), - - relabel_configs: ( - [{ source_labels: ['job'], target_label: '__tmp_prometheus_job_name' }] + - - // Match on service labels. - std.map( - function(k) { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(k)], - regex: podLogs.Spec.Selector.MatchLabels[k], - action: 'keep', - }, - // Keep the output consistent by sorting the keys first. - std.sort(std.objectFields( - if podLogs.Spec.Selector.MatchLabels != null - then podLogs.Spec.Selector.MatchLabels - else {} - )), - ) + - - // Set-based label matching. we have to map the valid relations - // `In`, `NotIn`, `Exists`, and `DoesNotExist` into relabling rules. - std.map( - function(exp) ( - if exp.Operator == 'In' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'keep', - } else if exp.Operator == 'NotIn' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'drop', - } else if exp.Operator == 'Exists' then { - source_labels: ['__meta_kubernetes_pod_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'keep', - } else if exp.Operator == 'DoesNotExist' then { - source_labels: ['__meta_kubernetes_pod_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'drop', - } - ), - k8s.array(podLogs.Spec.Selector.MatchExpressions), - ) + - - // Relabel namespace, pod, and service metalabels into proper labels. - [{ - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, { - source_labels: ['__meta_kubernetes_service_name'], - target_label: 'service', - }, { - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - target_label: 'container', - }] + - - // Relabel targetLabels from the service onto the target. - std.map( - function(l) { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(l)], - target_label: k8s.sanitize(l), - regex: '(.+)', - replacement: '$1', - }, - k8s.array(podLogs.Spec.PodTargetLabels) - ) + - - // By default, generate a safe job name from the service name. - std.filter(function(e) e != null, [ - { - target_label: 'job', - replacement: '%s/%s' % [meta.Namespace, meta.Name], - }, - if podLogs.Spec.JobLabel != '' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(podLogs.Spec.JobLabel)], - target_label: 'job', - regex: '(.+)', - replacement: '$1', - }, - ]) + - - // Kubernetes puts logs under subdirectories keyed pod UID and container_name. - [{ - source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'], - target_label: '__path__', - separator: '/', - replacement: '/var/log/pods/*$1/*.log', - }] + - - std.map( - function(c) new_relabel_config(c), - k8s.array(podLogs.Spec.RelabelConfigs), - ) + - - // Because of security risks, whenever enforcedNamespaceLabel is set, - // we want to append it to the relabel_configs as the last relabling to - // ensure it overrides all other relabelings. - std.filter(function(e) e != null, [ - if enforcedNamespaceLabel != '' then { - target_label: enforcedNamespaceLabel, - replacement: podLogs.ObjectMeta.Namespace, - }, - ]) - ), -} diff --git a/internal/static/operator/config/templates/component/logs/relabel_config.libsonnet b/internal/static/operator/config/templates/component/logs/relabel_config.libsonnet deleted file mode 100644 index ff82d9f870..0000000000 --- a/internal/static/operator/config/templates/component/logs/relabel_config.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; - -// @param {RelabelConfig} cfg -function(cfg) { - source_labels: optionals.array(cfg.SourceLabels), - separator: optionals.string(cfg.Separator), - regex: optionals.string(cfg.Regex), - modulus: optionals.number(cfg.Modulus), - target_label: optionals.string(cfg.TargetLabel), - replacement: optionals.string(cfg.Replacement), - action: optionals.string(cfg.Action), -} diff --git a/internal/static/operator/config/templates/component/logs/stages.libsonnet b/internal/static/operator/config/templates/component/logs/stages.libsonnet deleted file mode 100644 index c0ab0990e1..0000000000 --- a/internal/static/operator/config/templates/component/logs/stages.libsonnet +++ /dev/null @@ -1,145 +0,0 @@ -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; - -// Creates a new stage. -// -// @param {spec} PipelineStageSpec. -local new_stage = function(spec) { - // spec.Docker :: *DockerStageSpec - docker: if spec.Docker != null then {}, - - // spec.CRI :: *CRIStageSpec - cri: if spec.CRI != null then {}, - - // spec.Regex :: *RegexStageSpec - regex: if spec.Regex != null then { - expression: spec.Regex.Expression, - source: optionals.string(spec.Regex.Source), - }, - - // spec.JSON :: *JSONStageSpec - json: if spec.JSON != null then { - expressions: spec.JSON.Expressions, - source: optionals.string(spec.JSON.Source), - }, - - // spec.Replace :: *ReplaceStageSpec - replace: if spec.Replace != null then { - expression: spec.Replace.Expression, - source: optionals.string(spec.Replace.Source), - replace: optionals.string(spec.Replace.Replace), - }, - - // spec.Template :: *TemplateStageSpec - template: if spec.Template != null then { - source: spec.Template.Source, - template: spec.Template.Template, - }, - - // spec.Pack :: *PackStageSpec - pack: if spec.Pack != null then { - labels: optionals.array(spec.Pack.Labels), - ingest_timestamp: optionals.bool(spec.Pack.IngestTimestamp), - }, - - // spec.Timestamp :: *TimestampStageSpec - timestamp: if spec.Timestamp != null then { - source: spec.Timestamp.Source, - format: spec.Timestamp.Format, - fallback_formats: optionals.array(spec.Timestamp.FallbackFormats), - location: optionals.string(spec.Timestamp.Location), - action_on_failure: optionals.string(spec.Timestamp.ActionOnFailure), - }, - - // spec.Output :: *OutputStageSpec - output: if spec.Output != null then { - source: spec.Output.Source, - }, - - // spec.LabelDrop :: []string - labeldrop: optionals.array(spec.LabelDrop), - - // spec.LabelAllow :: []string - labelallow: optionals.array(spec.LabelAllow), - - // spec.Labels :: map[string]*string - labels: optionals.object(spec.Labels), - - // spec.Limit :: *LimitStageSpec - limit: if spec.Limit != null then { - rate: optionals.number(spec.Limit.Rate), - burst: optionals.number(spec.Limit.Burst), - drop: if spec.Limit.Drop != null then spec.Limit.Drop else false - }, - - // spec.Metrics :: map[string]MetricsStageSpec - metrics: if spec.Metrics != null then optionals.object(std.mapWithKey( - function(key, value) { - local metricType = std.asciiLower(value.Type), - type: - if metricType == 'counter' then 'Counter' - else if metricType == 'gauge' then 'Gauge' - else if metricType == 'histogram' then 'Histogram' - else value.Type, // Promtail will complain but for now it's better to do this than crash - - description: optionals.string(value.Description), - prefix: optionals.string(value.Prefix), - source: optionals.string(value.Source), - max_idle_duration: optionals.string(value.MaxIdleDuration), - - config: { - match_all: optionals.bool(value.MatchAll), - count_entry_bytes: optionals.bool(value.CountEntryBytes), - value: optionals.string(value.Value), - action: value.Action, - buckets: if value.Buckets != null then optionals.array(std.map( - function(bucket) - local val = std.parseJson(bucket); - assert std.isNumber(val) : 'bucket must be convertible to float'; - val, - value.Buckets, - )), - }, - }, - spec.Metrics, - )), - - multiline: if spec.Multiline != null then { - firstline: spec.Multiline.FirstLine, - max_wait_time: optionals.string(spec.Multiline.MaxWaitTime), - max_lines: optionals.number(spec.Multiline.MaxLines), - }, - - // spec.Tenant :: *TenantStageSpec - tenant: if spec.Tenant != null then { - label: optionals.string(spec.Tenant.Label), - source: optionals.string(spec.Tenant.Source), - value: optionals.string(spec.Tenant.Value), - }, - - // spec.Match :: *MatchStageSpec - match: if spec.Match != null then { - selector: spec.Match.Selector, - pipeline_name: optionals.string(spec.Match.PipelineName), - action: optionals.string(spec.Match.Action), - drop_counter_reason: optionals.string(spec.Match.DropCounterReason), - stages: if spec.Match.Stages != '' then ( - std.map( - function(stage) new_stage(stage), - marshal.intoStages(spec.Match.Stages), - ) - ), - }, - - // spec.Drop :: *DropStageSpec - drop: if spec.Drop != null then { - source: optionals.string(spec.Drop.Source), - expression: optionals.string(spec.Drop.Expression), - value: optionals.string(spec.Drop.Value), - older_than: optionals.string(spec.Drop.OlderThan), - longer_than: optionals.string(spec.Drop.LongerThan), - drop_counter_reason: optionals.string(spec.Drop.DropCounterReason), - }, -}; - -new_stage diff --git a/internal/static/operator/config/templates/component/metrics/external_labels.libsonnet b/internal/static/operator/config/templates/component/metrics/external_labels.libsonnet deleted file mode 100644 index 9a5d3cbc50..0000000000 --- a/internal/static/operator/config/templates/component/metrics/external_labels.libsonnet +++ /dev/null @@ -1,38 +0,0 @@ -// Generates an external_label mapping. This includes the -// user-provided labels as well as the injected cluster and -// replica labels. -// -// @param {config.Deployment} ctx -// @param {bool} addReplica -function(ctx, addReplica) ( - local meta = ctx.Agent.ObjectMeta; - local metrics = ctx.Agent.Spec.Metrics; - - // Provide the cluster label first. Doing it this way allows the user to - // override with a value they choose. - ( - local clusterValue = '%s/%s' % [meta.Namespace, meta.Name]; - local clusterLabel = metrics.MetricsExternalLabelName; - - if clusterLabel == null then { cluster: clusterValue } - else if clusterLabel != '' then { [clusterLabel]: clusterValue } - else {} - ) + - - // Then add in any user-configured labels. - ( - if metrics.ExternalLabels == null then {} - else metrics.ExternalLabels - ) + - - // Finally, add the replica label. We don't want the user to overwrite the - // replica label since it can cause duplicate sample problems. - if !addReplica then {} else ( - local replicaValue = 'replica-$(STATEFULSET_ORDINAL_NUMBER)'; - local replicaLabel = metrics.ReplicaExternalLabelName; - - if replicaLabel == null then { __replica__: replicaValue } - else if replicaLabel != '' then { [replicaLabel]: replicaValue } - else {} - ) -) diff --git a/internal/static/operator/config/templates/component/metrics/kube_sd_config.libsonnet b/internal/static/operator/config/templates/component/metrics/kube_sd_config.libsonnet deleted file mode 100644 index 9220e0b1ba..0000000000 --- a/internal/static/operator/config/templates/component/metrics/kube_sd_config.libsonnet +++ /dev/null @@ -1,42 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_tls_config = import './tls_config.libsonnet'; - -// kubernetes_sd_config returns a kubernetes_sd_config entry. -// -// @param {string} namespace - Namespace of GrafanaAgent resource -// @param {string[]} namespaces - Namespaces to discover resources in -// @param {APIServerConfig} apiServer - config to use for k8s discovery. -// @param {string} role - role of k8s resources to discover. -function( - namespace, - namespaces, - apiServer, - role, -) { - role: role, - namespaces: if std.length(k8s.array(namespaces)) > 0 then { - names: namespaces, - }, - - api_server: if apiServer != null then optionals.string(apiServer.Host), - - basic_auth: if apiServer != null && apiServer.BasicAuth != null then { - username: secrets.valueForSecret(namespace, apiServer.BasicAuth.Username), - password: secrets.valueForSecret(namespace, apiServer.BasicAuth.Password), - }, - - local bearerToken = if apiServer != null then optionals.string(apiServer.BearerToken), - local bearerTokenFile = if apiServer != null then optionals.string(apiServer.BearerTokenFile), - - authorization: if bearerToken != null || bearerTokenFile != null then { - type: 'Bearer', - credentials: bearerToken, - credentials_file: bearerTokenFile, - }, - - tls_config: if apiServer != null && apiServer.TLSConfig != null then - new_tls_config(namespace, apiServer.TLSConfig), -} diff --git a/internal/static/operator/config/templates/component/metrics/pod_monitor.libsonnet b/internal/static/operator/config/templates/component/metrics/pod_monitor.libsonnet deleted file mode 100644 index 862faba1a6..0000000000 --- a/internal/static/operator/config/templates/component/metrics/pod_monitor.libsonnet +++ /dev/null @@ -1,248 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_kube_sd_config = import './kube_sd_config.libsonnet'; -local new_relabel_config = import './relabel_config.libsonnet'; -local new_safe_tls_config = import './safe_tls_config.libsonnet'; - -// Genrates a scrape_config from a PodMonitor. -// -// @param {string} agentNamespace - Namespace the GrafanaAgent CR is in. -// @param {PodMonitor} monitor -// @param {PodMetricsEndpoint} endpoint - endpoint within the monitor -// @param {number} index - index of the endpoint -// @param {APIServerConfig} apiServer -// @param {boolean} overrideHonorLabels -// @param {boolean} overrideHonorTimestamps -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -// @param {*number} enforcedSampleLimit -// @param {*number} enforcedTargetLimit -// @param {number} shards -function( - agentNamespace, - monitor, - endpoint, - index, - apiServer, - overrideHonorLabels, - overrideHonorTimestamps, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, - enforcedSampleLimit, - enforcedTargetLimit, - shards, -) { - local meta = monitor.ObjectMeta, - - job_name: 'podMonitor/%s/%s/%d' % [meta.Namespace, meta.Name, index], - honor_labels: k8s.honorLabels(endpoint.HonorLabels, overrideHonorLabels), - - // We only want to provide honorTimestamps in the file when it's not null. - honor_timestamps: - local honor = k8s.honorTimestamps(endpoint.HonorTimestamps, overrideHonorTimestamps); - if honor != null then honor, - - kubernetes_sd_configs: [ - new_kube_sd_config( - namespace=agentNamespace, - namespaces=k8s.namespacesFromSelector( - monitor.Spec.NamespaceSelector, - meta.Namespace, - ignoreNamespaceSelectors, - ), - apiServer=apiServer, - role='pod', - ), - ], - - scrape_interval: optionals.string(endpoint.Interval), - scrape_timeout: optionals.string(endpoint.ScrapeTimeout), - metrics_path: optionals.string(endpoint.Path), - proxy_url: optionals.string(endpoint.ProxyURL), - params: optionals.object(endpoint.Params), - scheme: optionals.string(endpoint.Scheme), - enable_http2: optionals.bool(endpoint.EnableHttp2,true), - - // NOTE(rfratto): unlike ServiceMonitor, pod monitors explicitly use - // SafeTLSConfig. - tls_config: - if endpoint.TLSConfig != null then new_safe_tls_config(meta.Namespace, endpoint.TLSConfig.SafeTLSConfig), - bearer_token: - if endpoint.BearerTokenSecret.LocalObjectReference.Name != '' - then secrets.valueForSecret(meta.Namespace, endpoint.BearerTokenSecret), - - basic_auth: if endpoint.BasicAuth != null then { - username: secrets.valueForSecret(meta.Namespace, endpoint.BasicAuth.Username), - password: secrets.valueForSecret(meta.Namespace, endpoint.BasicAuth.Password), - }, - - relabel_configs: ( - [{ source_labels: ['job'], target_label: '__tmp_prometheus_job_name' }] + - - (if endpoint.FilterRunning != null && endpoint.FilterRunning then [{ - source_labels: ['__meta_kubernetes_pod_phase'], - regex: '(Failed|Succeeded)', - action: 'drop', - }] else [] ) + - - // Match on service labels. - std.map( - function(k) { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(k)], - regex: monitor.Spec.Selector.MatchLabels[k], - action: 'keep', - }, - // Keep the output consistent by sorting the keys first. - std.sort(std.objectFields( - if monitor.Spec.Selector.MatchLabels != null - then monitor.Spec.Selector.MatchLabels - else {} - )), - ) + - - // Set-based label matching. we have to map the valid relations - // `In`, `NotIn`, `Exists`, and `DoesNotExist` into relabling rules. - std.map( - function(exp) ( - if exp.Operator == 'In' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'keep', - } else if exp.Operator == 'NotIn' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'drop', - } else if exp.Operator == 'Exists' then { - source_labels: ['__meta_kubernetes_pod_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'keep', - } else if exp.Operator == 'DoesNotExist' then { - source_labels: ['__meta_kubernetes_pod_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'drop', - } - ), - k8s.array(monitor.Spec.Selector.MatchExpressions), - ) + - - // First targets based on correct port for the endpoint. If ep.Port, - // ep.TargetPort.StrVal, or ep.TargetPort.IntVal aren't set, then - // we'll have a null relabel_configs, which will be filtered out. - std.filter(function(element) element != null, [ - if endpoint.Port != '' then { - source_labels: ['__meta_kubernetes_pod_container_port_name'], - regex: endpoint.Port, - action: 'keep', - } else if endpoint.TargetPort != null then ( - if endpoint.TargetPort.StrVal != '' then { - source_labels: ['__meta_kubernetes_pod_container_port_name'], - regex: endpoint.TargetPort.StrVal, - action: 'keep', - } else if endpoint.TargetPort.IntVal != 0 then { - source_labels: ['__meta_kubernetes_pod_container_port_number'], - regex: std.toString(endpoint.TargetPort.IntVal), - action: 'keep', - } - ), - ]) + - - // Relabel namespace, pod, and service metalabels into proper labels. - [{ - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, { - source_labels: ['__meta_kubernetes_service_name'], - target_label: 'service', - }, { - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - target_label: 'container', - }] + - - // Relabel targetLabels from the service onto the target. - std.map( - function(l) { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(l)], - target_label: k8s.sanitize(l), - regex: '(.+)', - replacement: '$1', - }, - k8s.array(monitor.Spec.PodTargetLabels) - ) + - - // By default, generate a safe job name from the service name. We also keep - // this around if a jobLabel is set just in case targets don't actually have - // a value for it. A single service may potentially have multiple metrics - // endpoints, therefore the endpoints labels is filled with the ports name - // or (as a fallback) the port number. - std.filter(function(e) e != null, [ - { - target_label: 'job', - replacement: '%s/%s' % [meta.Namespace, meta.Name], - }, - if monitor.Spec.JobLabel != '' then { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(monitor.Spec.JobLabel)], - target_label: 'job', - regex: '(.+)', - replacement: '$1', - }, - ]) + - - std.filter(function(e) e != null, [ - if endpoint.Port != '' then { - target_label: 'endpoint', - replacement: endpoint.Port, - } else if k8s.intOrString(endpoint.TargetPort) != '' then { - target_label: 'endpoint', - replacement: k8s.intOrString(endpoint.TargetPort), - }, - ]) + - - std.map( - function(c) new_relabel_config(c), - k8s.array(endpoint.RelabelConfigs), - ) + - - // Because of security risks, whenever enforcedNamespaceLabel is set, - // we want to append it to the relabel_configs as the last relabling to - // ensure it overrides all other relabelings. - std.filter(function(e) e != null, [ - if enforcedNamespaceLabel != '' then { - target_label: enforcedNamespaceLabel, - replacement: monitor.ObjectMeta.Namespace, - }, - - // Shard rules - { - source_labels: ['__address__'], - target_label: '__tmp_hash', - modulus: shards, - action: 'hashmod', - }, - { - source_labels: ['__tmp_hash'], - regex: '$(SHARD)', - action: 'keep', - }, - ]) - ), - - metric_relabel_configs: if endpoint.MetricRelabelConfigs != null then optionals.array( - std.filterMap( - function(c) !(c.TargetLabel != '' && enforcedNamespaceLabel != '' && c.TargetLabel == enforcedNamespaceLabel), - function(c) new_relabel_config(c), - k8s.array(endpoint.MetricRelabelConfigs), - ) - ), - - sample_limit: - if monitor.Spec.SampleLimit > 0 || enforcedSampleLimit != null - then k8s.limit(monitor.Spec.SampleLimit, enforcedSampleLimit), - target_limit: - if monitor.Spec.TargetLimit > 0 || enforcedTargetLimit != null - then k8s.limit(monitor.Spec.TargetLimit, enforcedTargetLimit), -} diff --git a/internal/static/operator/config/templates/component/metrics/probe.libsonnet b/internal/static/operator/config/templates/component/metrics/probe.libsonnet deleted file mode 100644 index 833b3c55ef..0000000000 --- a/internal/static/operator/config/templates/component/metrics/probe.libsonnet +++ /dev/null @@ -1,216 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_kube_sd_config = import './kube_sd_config.libsonnet'; -local new_relabel_config = import './relabel_config.libsonnet'; -local new_tls_config = import './tls_config.libsonnet'; - -// Genrates a scrape_config from a Probe. -// -// @param {string} agentNamespace - Namespace the GrafanaAgent CR is in. -// @param {Probe} probe -// @param {APIServerConfig} apiServer -// @param {boolean} overrideHonorTimestamps -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -// @param {*number} enforcedSampleLimit -// @param {*number} enforcedTargetLimit -// @param {number} shards -function( - agentNamespace, - probe, - apiServer, - overrideHonorTimestamps, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, - enforcedSampleLimit, - enforcedTargetLimit, - shards, -) { - local meta = probe.ObjectMeta, - - job_name: 'probe/%s/%s' % [meta.Namespace, meta.Name], - - honor_timestamps: - local honor = k8s.honorTimestamps(true, overrideHonorTimestamps); - if honor != null then honor, - - local path = - if probe.Spec.ProberSpec.Path == '' - then '/probe' - else probe.Spec.ProberSpec.Path, - metrics_path: path, - - scrape_interval: optionals.string(probe.Spec.Interval), - scrape_timeout: optionals.string(probe.Spec.ScrapeTimeout), - scheme: optionals.string(probe.Spec.ProberSpec.Scheme), - params: { - module: [probe.Spec.Module], - }, - - tls_config: - if probe.Spec.TLSConfig != null then new_tls_config(meta.Namespace, probe.Spec.TLSConfig), - bearer_token: - if probe.Spec.BearerTokenSecret.LocalObjectReference.Name != '' - then secrets.valueForSecret(meta.Namespace, probe.Spec.BearerTokenSecret), - - basic_auth: if probe.Spec.BasicAuth != null then { - username: secrets.valueForSecret(meta.Namespace, probe.Spec.BasicAuth.Username), - password: secrets.valueForSecret(meta.Namespace, probe.Spec.BasicAuth.Password), - }, - - // Generate static_configs section if StaticConfig is provided. - static_configs: optionals.array(if probe.Spec.Targets.StaticConfig != null then [{ - targets: probe.Spec.Targets.StaticConfig.Targets, - labels: ( - if probe.Spec.Targets.StaticConfig.Labels != null - then probe.Spec.Targets.StaticConfig.Labels { - namespace: meta.Namespace, - } - else { namespace: meta.Namespace } - ), - }]), - - // Generate kubernetes_sd_configs section if StaticConfig is *not* provided. - kubernetes_sd_configs: optionals.array(if probe.Spec.Targets.StaticConfig == null then [ - new_kube_sd_config( - namespace=agentNamespace, - namespaces=k8s.namespacesFromSelector( - probe.Spec.Targets.Ingress.NamespaceSelector, - meta.Namespace, - ignoreNamespaceSelectors, - ), - apiServer=apiServer, - role='ingress', - ), - ]), - - relabel_configs: ( - [{ source_labels: ['job'], target_label: '__tmp_prometheus_job_name' }] + - - std.filter(function(e) e != null, [ - if probe.Spec.JobName != '' then { - target_label: 'job', - replacement: probe.Spec.JobName, - }, - ]) + - - // Relabelings for static_config. - k8s.array( - if probe.Spec.Targets.StaticConfig != null then - [{ - source_labels: ['__address__'], - target_label: '__param_target', - }, { - source_labels: ['__param_target'], - target_label: 'instance', - }, { - target_label: '__address__', - replacement: probe.Spec.ProberSpec.URL, - }] + - - // Add configured relablings - std.map( - function(r) new_relabel_config(r), - k8s.array(probe.Spec.Targets.StaticConfig.RelabelConfigs), - ) - ) + - - // Relablings for kubernetes_sd_config. - k8s.array( - if probe.Spec.Targets.StaticConfig == null then - // Match on service labels. - std.map( - function(k) { - source_labels: ['__meta_kubernetes_ingress_label_' + k8s.sanitize(k)], - regex: probe.Spec.Targets.Ingress.Selector.MatchLabels[k], - action: 'keep', - }, - // Keep the output consistent by sorting the keys first. - std.sort(std.objectFields( - if probe.Spec.Targets.Ingress.Selector.MatchLabels != null - then probe.Spec.Targets.Ingress.Selector.MatchLabels - else {} - )), - ) + - - // Set-based label matching. we have to map the valid relations - // `In`, `NotIn`, `Exists`, and `DoesNotExist` into relabling rules. - std.map( - function(exp) ( - if exp.Operator == 'In' then { - source_labels: ['__meta_kubernetes_ingress_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'keep', - } else if exp.Operator == 'NotIn' then { - source_labels: ['__meta_kubernetes_ingress_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'drop', - } else if exp.Operator == 'Exists' then { - source_labels: ['__meta_kubernetes_ingress_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'keep', - } else if exp.Operator == 'DoesNotExist' then { - source_labels: ['__meta_kubernetes_ingress_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'drop', - } - ), - k8s.array(probe.Spec.Targets.Ingress.Selector.MatchExpressions), - ) + - - // Relablings for ingress SD - [ - { - source_labels: [ - '__meta_kubernetes_ingress_scheme', - '__address__', - '__meta_kubernetes_ingress_path', - ], - separator: ';', - regex: '(.+);(.+);(.+)', - target_label: '__param_target', - replacement: '$1://$2$3', - action: 'replace', - }, - { - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, - { - source_labels: ['__meta_kubernetes_ingress_name'], - target_label: 'ingress', - }, - ] + - - // Relablings for prober - [ - { - source_labels: ['__param_target'], - target_label: 'instance', - }, - { - target_label: '__address__', - replacement: probe.Spec.ProberSpec.URL, - }, - ] + - - // Add configured relablings. - std.map( - function(r) new_relabel_config(r), - k8s.array(probe.Spec.Targets.Ingress.RelabelConfigs), - ) - ) + - - // Because of security risks, whenever enforcedNamespaceLabel is set, - // we want to append it to the relabel_configs as the last relabling to - // ensure it overrides all other relabelings. - std.filter(function(e) e != null, [ - if enforcedNamespaceLabel != '' then { - target_label: enforcedNamespaceLabel, - replacement: probe.ObjectMeta.Namespace, - }, - ]) - ), -} diff --git a/internal/static/operator/config/templates/component/metrics/relabel_config.libsonnet b/internal/static/operator/config/templates/component/metrics/relabel_config.libsonnet deleted file mode 100644 index ff82d9f870..0000000000 --- a/internal/static/operator/config/templates/component/metrics/relabel_config.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; - -// @param {RelabelConfig} cfg -function(cfg) { - source_labels: optionals.array(cfg.SourceLabels), - separator: optionals.string(cfg.Separator), - regex: optionals.string(cfg.Regex), - modulus: optionals.number(cfg.Modulus), - target_label: optionals.string(cfg.TargetLabel), - replacement: optionals.string(cfg.Replacement), - action: optionals.string(cfg.Action), -} diff --git a/internal/static/operator/config/templates/component/metrics/remote_write.libsonnet b/internal/static/operator/config/templates/component/metrics/remote_write.libsonnet deleted file mode 100644 index 6a1bf9453b..0000000000 --- a/internal/static/operator/config/templates/component/metrics/remote_write.libsonnet +++ /dev/null @@ -1,87 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; - -local new_relabel_config = import './relabel_config.libsonnet'; -local new_tls_config = import './tls_config.libsonnet'; - -// Generates the contents of a remote_write object. -// -// @param {string} namespace - namespace of the RemoteWriteSpec. -// @param {RemoteWriteSpec} rw -function(namespace, rw) { - // TODO(rfratto): follow_redirects - // TODO(rfratto): retry_on_http_429, currently experimental - - url: rw.URL, - name: optionals.string(rw.Name), - remote_timeout: optionals.string(rw.RemoteTimeout), - headers: optionals.object(rw.Headers), - proxy_url: optionals.string(rw.ProxyURL), - - write_relabel_configs: optionals.array(std.map( - new_relabel_config, - rw.WriteRelabelConfigs, - )), - - tls_config: ( - if rw.TLSConfig != null then - new_tls_config(namespace, rw.TLSConfig) - ), - - basic_auth: ( - if rw.BasicAuth != null then { - username: secrets.valueForSecret(namespace, rw.BasicAuth.Username), - password_file: secrets.pathForSecret(namespace, rw.BasicAuth.Password), - } - ), - oauth2: ( - if rw.OAuth2 != null then { - // TODO: client_id can also be stored in a config map: - // secrets.valueForConfigMap(namespace, rw.OAuth2.ClientID.ConfigMap), - local client_id = secrets.valueForSecret(namespace, rw.OAuth2.ClientID.Secret), - - client_id: client_id, - client_secret_file: secrets.pathForSecret(namespace, rw.OAuth2.ClientSecret), - endpoint_params: rw.OAuth2.EndpointParams, - scopes: rw.OAuth2.Scopes, - token_url: rw.OAuth2.TokenURL, - } - ), - local bearerToken = optionals.string(rw.BearerToken), - local bearerTokenFile = optionals.string(rw.BearerTokenFile), - - authorization: if bearerToken != null || bearerTokenFile != null then { - type: 'Bearer', - credentials: bearerToken, - credentials_file: bearerTokenFile, - }, - - sigv4: ( - if rw.SigV4 != null then { - region: optionals.string(rw.SigV4.Region), - profile: optionals.string(rw.SigV4.Profile), - role_arn: optionals.string(rw.SigV4.RoleARN), - access_key: secrets.valueForSecret(namespace, rw.SigV4.AccessKey), - secret_key: secrets.valueForSecret(namespace, rw.SigV4.SecretKey), - } - ), - - queue_config: ( - if rw.QueueConfig != null then { - capacity: optionals.number(rw.QueueConfig.Capacity), - max_shards: optionals.number(rw.QueueConfig.MaxShards), - min_shards: optionals.number(rw.QueueConfig.MinShards), - max_samples_per_send: optionals.number(rw.QueueConfig.MaxSamplesPerSend), - batch_send_deadline: optionals.string(rw.QueueConfig.BatchSendDeadline), - min_backoff: optionals.string(rw.QueueConfig.MinBackoff), - max_backoff: optionals.string(rw.QueueConfig.MaxBackoff), - } - ), - - metadata_config: ( - if rw.MetadataConfig != null then { - send: rw.MetadataConfig.Send, - send_interval: optionals.string(rw.MetadataConfig.SendInterval), - } - ), -} diff --git a/internal/static/operator/config/templates/component/metrics/safe_tls_config.libsonnet b/internal/static/operator/config/templates/component/metrics/safe_tls_config.libsonnet deleted file mode 100644 index 05e6eb9ce8..0000000000 --- a/internal/static/operator/config/templates/component/metrics/safe_tls_config.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; - -// @param {string} namespace -// @param {SafeTLSConfig} config -function(namespace, config) { - ca_file: secrets.pathForSelector(namespace, config.CA), - cert_file: secrets.pathForSelector(namespace, config.Cert), - key_file: secrets.pathForSecret(namespace, config.KeySecret), - - server_name: optionals.string(config.ServerName), - insecure_skip_verify: optionals.bool(config.InsecureSkipVerify), -} diff --git a/internal/static/operator/config/templates/component/metrics/service_monitor.libsonnet b/internal/static/operator/config/templates/component/metrics/service_monitor.libsonnet deleted file mode 100644 index c5c4966471..0000000000 --- a/internal/static/operator/config/templates/component/metrics/service_monitor.libsonnet +++ /dev/null @@ -1,277 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_kube_sd_config = import './kube_sd_config.libsonnet'; -local new_relabel_config = import './relabel_config.libsonnet'; -local new_tls_config = import './tls_config.libsonnet'; - -// Genrates a scrape_config from a ServiceMonitor. -// -// @param {string} agentNamespace - Namespace the GrafanaAgent CR is in. -// @param {ServiceMonitor} monitor -// @param {Endpoint} endpoint - endpoint within the monitor -// @param {number} index - index of the endpoint -// @param {APIServerConfig} apiServer -// @param {boolean} overrideHonorLabels -// @param {boolean} overrideHonorTimestamps -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -// @param {*number} enforcedSampleLimit -// @param {*number} enforcedTargetLimit -// @param {number} shards -function( - agentNamespace, - monitor, - endpoint, - index, - apiServer, - overrideHonorLabels, - overrideHonorTimestamps, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, - enforcedSampleLimit, - enforcedTargetLimit, - shards, -) { - local meta = monitor.ObjectMeta, - - job_name: 'serviceMonitor/%s/%s/%d' % [meta.Namespace, meta.Name, index], - honor_labels: k8s.honorLabels(endpoint.HonorLabels, overrideHonorLabels), - - // We only want to provide honorTimestamps in the file when it's not null. - honor_timestamps: - local honor = k8s.honorTimestamps(endpoint.HonorTimestamps, overrideHonorTimestamps); - if honor != null then honor, - - kubernetes_sd_configs: [ - new_kube_sd_config( - namespace=agentNamespace, - namespaces=k8s.namespacesFromSelector( - monitor.Spec.NamespaceSelector, - meta.Namespace, - ignoreNamespaceSelectors, - ), - apiServer=apiServer, - role='endpoints', - ), - ], - - scrape_interval: optionals.string(endpoint.Interval), - scrape_timeout: optionals.string(endpoint.ScrapeTimeout), - metrics_path: optionals.string(endpoint.Path), - proxy_url: optionals.string(endpoint.ProxyURL), - params: optionals.object(endpoint.Params), - scheme: optionals.string(endpoint.Scheme), - enable_http2: optionals.bool(endpoint.EnableHttp2,true), - - tls_config: - if endpoint.TLSConfig != null then new_tls_config(meta.Namespace, endpoint.TLSConfig), - bearer_token_file: optionals.string(endpoint.BearerTokenFile), - bearer_token: - if endpoint.BearerTokenSecret.LocalObjectReference.Name != '' - then secrets.valueForSecret(meta.Namespace, endpoint.BearerTokenSecret), - - basic_auth: if endpoint.BasicAuth != null then { - username: secrets.valueForSecret(meta.Namespace, endpoint.BasicAuth.Username), - password: secrets.valueForSecret(meta.Namespace, endpoint.BasicAuth.Password), - }, - - relabel_configs: ( - [{ source_labels: ['job'], target_label: '__tmp_prometheus_job_name' }] + - - // Match on service labels. - std.map( - function(k) { - source_labels: ['__meta_kubernetes_service_label_' + k8s.sanitize(k)], - regex: monitor.Spec.Selector.MatchLabels[k], - action: 'keep', - }, - // Keep the output consistent by sorting the keys first. - std.sort(std.objectFields( - if monitor.Spec.Selector.MatchLabels != null - then monitor.Spec.Selector.MatchLabels - else {} - )), - ) + - - // Set-based label matching. we have to map the valid relations - // `In`, `NotIn`, `Exists`, and `DoesNotExist` into relabling rules. - std.map( - function(exp) ( - if exp.Operator == 'In' then { - source_labels: ['__meta_kubernetes_service_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'keep', - } else if exp.Operator == 'NotIn' then { - source_labels: ['__meta_kubernetes_service_label_' + k8s.sanitize(exp.Key)], - regex: std.join('|', exp.Values), - action: 'drop', - } else if exp.Operator == 'Exists' then { - source_labels: ['__meta_kubernetes_service_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'keep', - } else if exp.Operator == 'DoesNotExist' then { - source_labels: ['__meta_kubernetes_service_labelpresent_' + k8s.sanitize(exp.Key)], - regex: 'true', - action: 'drop', - } - ), - k8s.array(monitor.Spec.Selector.MatchExpressions), - ) + - - // First targets based on correct port for the endpoint. If ep.Port, - // ep.TargetPort.StrVal, or ep.TargetPort.IntVal aren't set, then - // we'll have a null relabel_configs, which will be filtered out. - // - // We do this to avoid having an array with a null element inside of it. - std.filter(function(element) element != null, [ - if endpoint.Port != '' then { - source_labels: ['__meta_kubernetes_endpoint_port_name'], - regex: endpoint.Port, - action: 'keep', - } else if endpoint.TargetPort != null then ( - if endpoint.TargetPort.StrVal != '' then { - source_labels: ['__meta_kubernetes_pod_container_port_name'], - regex: endpoint.TargetPort.StrVal, - action: 'keep', - } else if endpoint.TargetPort.IntVal != 0 then { - source_labels: ['__meta_kubernetes_pod_container_port_number'], - regex: std.toString(endpoint.TargetPort.IntVal), - action: 'keep', - } - ), - ]) + - - // Relabel namespace, pod, and service metalabels into proper labels. - [{ - source_labels: [ - '__meta_kubernetes_endpoint_address_target_kind', - '__meta_kubernetes_endpoint_address_target_name', - ], - target_label: 'node', - separator: ';', - regex: 'Node;(.*)', - replacement: '$1', - }, { - source_labels: [ - '__meta_kubernetes_endpoint_address_target_kind', - '__meta_kubernetes_endpoint_address_target_name', - ], - target_label: 'pod', - separator: ';', - regex: 'Pod;(.*)', - replacement: '$1', - }, { - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, { - source_labels: ['__meta_kubernetes_service_name'], - target_label: 'service', - }, { - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - target_label: 'container', - }] + - - (if endpoint.FilterRunning != null && endpoint.FilterRunning then [{ - source_labels: ['__meta_kubernetes_pod_phase'], - regex: '(Failed|Succeeded)', - action: 'drop', - }] else [] ) + - - // Relabel targetLabels from the service onto the target. - std.map( - function(l) { - source_labels: ['__meta_kubernetes_service_label_' + k8s.sanitize(l)], - target_label: k8s.sanitize(l), - regex: '(.+)', - replacement: '$1', - }, - k8s.array(monitor.Spec.TargetLabels) - ) + - std.map( - function(l) { - source_labels: ['__meta_kubernetes_pod_label_' + k8s.sanitize(l)], - target_label: k8s.sanitize(l), - regex: '(.+)', - replacement: '$1', - }, - k8s.array(monitor.Spec.PodTargetLabels) - ) + - - // By default, generate a safe job name from the service name. We also keep - // this around if a jobLabel is set just in case targets don't actually have - // a value for it. A single service may potentially have multiple metrics - // endpoints, therefore the endpoints labels is filled with the ports name - // or (as a fallback) the port number. - std.filter(function(e) e != null, [ - { - source_labels: ['__meta_kubernetes_service_name'], - target_label: 'job', - replacement: '$1', - }, - if monitor.Spec.JobLabel != '' then { - source_labels: ['__meta_kubernetes_service_label_' + k8s.sanitize(monitor.Spec.JobLabel)], - target_label: 'job', - regex: '(.+)', - replacement: '$1', - }, - ]) + - - std.filter(function(e) e != null, [ - if endpoint.Port != '' then { - target_label: 'endpoint', - replacement: endpoint.Port, - } else if k8s.intOrString(endpoint.TargetPort) != '' then { - target_label: 'endpoint', - replacement: k8s.intOrString(endpoint.TargetPort), - }, - ]) + - - std.map( - function(c) new_relabel_config(c), - k8s.array(endpoint.RelabelConfigs), - ) + - - // Because of security risks, whenever enforcedNamespaceLabel is set, - // we want to append it to the relabel_configs as the last relabling to - // ensure it overrides all other relabelings. - std.filter(function(e) e != null, [ - if enforcedNamespaceLabel != '' then { - target_label: enforcedNamespaceLabel, - replacement: monitor.ObjectMeta.Namespace, - }, - - // Shard rules - { - source_labels: ['__address__'], - target_label: '__tmp_hash', - modulus: shards, - action: 'hashmod', - }, - { - source_labels: ['__tmp_hash'], - regex: '$(SHARD)', - action: 'keep', - }, - ]) - ), - - metric_relabel_configs: if endpoint.MetricRelabelConfigs != null then optionals.array( - std.filterMap( - function(c) !(c.TargetLabel != '' && enforcedNamespaceLabel != '' && c.TargetLabel == enforcedNamespaceLabel), - function(c) new_relabel_config(c), - k8s.array(endpoint.MetricRelabelConfigs), - ) - ), - - sample_limit: - if monitor.Spec.SampleLimit > 0 || enforcedSampleLimit != null - then k8s.limit(monitor.Spec.SampleLimit, enforcedSampleLimit), - target_limit: - if monitor.Spec.TargetLimit > 0 || enforcedTargetLimit != null - then k8s.limit(monitor.Spec.TargetLimit, enforcedTargetLimit), -} diff --git a/internal/static/operator/config/templates/component/metrics/tls_config.libsonnet b/internal/static/operator/config/templates/component/metrics/tls_config.libsonnet deleted file mode 100644 index e8f5ff09b2..0000000000 --- a/internal/static/operator/config/templates/component/metrics/tls_config.libsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; - -local new_safe_tls_config = import './safe_tls_config.libsonnet'; - -// @param {string} namespace -// @param {TLSConfig} config -function(namespace, config) new_safe_tls_config(namespace, config.SafeTLSConfig) + { - // Local configurations for ca_file, cert_file, and key_file take precedence - // over the SafeTLSConfig. Check local settings first and then fall back - // to the safe settings. - - local has_ca_file = std.objectHasAll(config, 'CAFile'), - local has_cert_file = std.objectHasAll(config, 'CertFile'), - local has_key_file = std.objectHasAll(config, 'KeyFile'), - - ca_file: - local unsafe = if has_ca_file then optionals.string(config.CAFile) else null; - if unsafe == null then super.ca_file else unsafe, - - cert_file: - local unsafe = if has_cert_file then optionals.string(config.CertFile) else null; - if unsafe == null then super.cert_file else unsafe, - - key_file: - local unsafe = if has_key_file then optionals.string(config.KeyFile) else null; - if unsafe == null then super.key_file else unsafe, -} diff --git a/internal/static/operator/config/templates/ext/marshal.libsonnet b/internal/static/operator/config/templates/ext/marshal.libsonnet deleted file mode 100644 index 3ad5106b6f..0000000000 --- a/internal/static/operator/config/templates/ext/marshal.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -{ - // YAML marshals object to YAML. - YAML(object):: std.native('marshalYAML')(object), - - // fromYAML unmarshals YAML text into an object. - fromYAML(text):: std.native('unmarshalYAML')(text), - - // intoStages unmarshals YAML text into []*PipelineStageSpec. - // This is required because the "match" stage from Promtail is - // recursive and you can't define recursive types in CRDs. - intoStages(text):: std.native('intoStages')(text), -} diff --git a/internal/static/operator/config/templates/ext/optionals.libsonnet b/internal/static/operator/config/templates/ext/optionals.libsonnet deleted file mode 100644 index 653f5e7c33..0000000000 --- a/internal/static/operator/config/templates/ext/optionals.libsonnet +++ /dev/null @@ -1,41 +0,0 @@ -{ - // string returns null if value is an empty length string, otherwise - // returns the input. - string(value):: - if value == null then null - else - assert std.isString(value) : 'string must only be called with strings'; - if std.length(value) == 0 then null else value, - - // number returns null if value is 0, otherwise returns value. - number(value):: - if value == null then null - else - assert std.isNumber(value) : 'number must only be called with numbers'; - if value == 0 then null else value, - - // bool returns a value only if the value is present, and not equal to the default. otherwise returns null. - bool(value, defaultValue = false):: - if value == null then null - else - assert std.isBoolean(value) : 'bool must only be called with booleans'; - if value == defaultValue then null else value, - - // object returns null if there are no keys in the object. - object(value):: - if value == null then null - else - assert std.isObject(value) : 'object must only be called with objects'; - if std.length(value) == 0 then null else value, - - // array returns null if there are no elements in the array. - array(value):: - if value == null then null - else - assert std.isArray(value) : 'array must only be called with arrays'; - if std.length(value) == 0 then null else value, - - // trim will recursively traverse through value and remove all fields - // from value that have a value of null. - trim(value):: std.native('trimOptional')(value), -} diff --git a/internal/static/operator/config/templates/ext/secrets.libsonnet b/internal/static/operator/config/templates/ext/secrets.libsonnet deleted file mode 100644 index f3addad4a3..0000000000 --- a/internal/static/operator/config/templates/ext/secrets.libsonnet +++ /dev/null @@ -1,65 +0,0 @@ -// secrets.libsonnet provides utilities for interacting with secrets that are -// both loaded into memory when building the configuration (for confguring -// values that otherwise cannot be read from a file) and mounted into pods (for -// values that *can* be read from a file.) -// -// Since type information is lost in the conversion to Jsonnet, we have to -// specify if a selector is specifically for a secret, config map, or either. - -local keyValue(key) = - if key == null then null - else std.native('secretLookup')(key); - -local keyPath(key) = - if key == null then null - else std.native('secretPath')(key); - -// functions to get the key for a given selector. -local keys = { - forSecret(namespace, selector):: - if selector == null then null - else '/secrets/%s/%s/%s' % [ - namespace, - selector.LocalObjectReference.Name, - selector.Key, - ], - - forConfigMap(namespace, selector):: - if selector == null then null - else '/configMaps/%s/%s/%s' % [ - namespace, - selector.LocalObjectReference.Name, - selector.Key, - ], - - forSelector(namespace, selector):: - if selector == null then null - else if selector.Secret != null then $.forSecret(namespace, selector.Secret) - else if selector.ConfigMap != null then $.forConfigMap(namespace, selector.ConfigMap), -}; - -{ - // valueForSecret gets the cached value of a SecretKeySelector. - valueForSecret(namespace, selector):: - keyValue(keys.forSecret(namespace, selector)), - - // valueForConfigMap gets the cached value of a ConfigMapKeySelector. - valueForConfigMap(namespace, selector):: - keyValue(keys.forConfigMap(namespace, selector)), - - // valueForSelector gets the cached value of a SecretOrConfigMap. - valueForSelector(namespace, selector):: - keyValue(keys.forSelector(namespace, selector)), - - // pathForSecret gets the path on disk for a SecretKeySelector. - pathForSecret(namespace, selector):: - keyPath(keys.forSecret(namespace, selector)), - - // pathForConfigMap gets the path on disk for a ConfigMapKeySelector. - pathForConfigMap(namespace, selector):: - keyPath(keys.forConfigMap(namespace, selector)), - - // pathForSelector gets the path on disk for a SecretOrConfigMap. - pathForSelector(namespace, selector):: - keyPath(keys.forSelector(namespace, selector)), -} diff --git a/internal/static/operator/config/templates/integrations.libsonnet b/internal/static/operator/config/templates/integrations.libsonnet deleted file mode 100644 index e7cd060bab..0000000000 --- a/internal/static/operator/config/templates/integrations.libsonnet +++ /dev/null @@ -1,9 +0,0 @@ -// Generates an individual integration. -// -// @param {Integration} integration -function(integration) - // integration.Spec.Config.Raw is a base64 JSON string holding the raw config - // for the integration. - local raw = integration.Spec.Config.Raw; - if raw == null || std.length(raw) == 0 then {} - else std.parseJson(std.base64Decode(raw)) diff --git a/internal/static/operator/config/templates/logs.libsonnet b/internal/static/operator/config/templates/logs.libsonnet deleted file mode 100644 index cb4e78a988..0000000000 --- a/internal/static/operator/config/templates/logs.libsonnet +++ /dev/null @@ -1,74 +0,0 @@ -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_client = import 'component/logs/client.libsonnet'; -local new_pod_logs = import 'component/logs/pod_logs.libsonnet'; - -// Generates a logs_instance. -// -// @param {GrafanaAgent} agent -// @param {LogsSubsystemSpec} global - global logs settings & defaults -// @param {LogInstance} instance -// @param {APIServerConfig} apiServer -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -function( - agent, - global, - instance, - apiServer, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, -) { - local agentNamespace = agent.ObjectMeta.Namespace, - local meta = instance.Instance.ObjectMeta, - local spec = instance.Instance.Spec, - - name: '%s/%s' % [meta.Namespace, meta.Name], - - // Figure out what set of clients to use and what namespace they're in. - // We'll only use the global set of clients if the local LogsInstance doesn't - // have a set of clients defined. - // - // Local clients come from the namespace of the LogsInstance and global - // clients from the Agent's namespace. - local clients = - if std.length(spec.Clients) != 0 - then { ns: meta.Namespace, list: spec.Clients } - else { ns: agentNamespace, list: global.Clients }, - - clients: optionals.array(std.map( - function(spec) new_client(agent, clients.ns, spec), - clients.list, - )), - - scrape_configs: optionals.array( - // Iterate over PodLogs. Each PodMonitors converts into a - // single scrape_config. - std.map( - function(podLogs) new_pod_logs( - agentNamespace=agentNamespace, - podLogs=podLogs, - apiServer=apiServer, - ignoreNamespaceSelectors=ignoreNamespaceSelectors, - enforcedNamespaceLabel=enforcedNamespaceLabel, - ), - k8s.array(instance.PodLogs) - ) + - - // If the user specified additional scrape configs, we need to extract - // their value from the secret and then unmarshal them into the array. - k8s.array( - if spec.AdditionalScrapeConfigs != null then ( - local rawYAML = secrets.valueForSecret(meta.Namespace, spec.AdditionalScrapeConfigs); - marshal.fromYAML(rawYAML) - ) - ), - ), - - target_config: if spec.TargetConfig != null then { - sync_period: optionals.string(spec.TargetConfig.SyncPeriod), - }, -} diff --git a/internal/static/operator/config/templates/metrics.libsonnet b/internal/static/operator/config/templates/metrics.libsonnet deleted file mode 100644 index 424d60a7e4..0000000000 --- a/internal/static/operator/config/templates/metrics.libsonnet +++ /dev/null @@ -1,133 +0,0 @@ -local marshal = import 'ext/marshal.libsonnet'; -local optionals = import 'ext/optionals.libsonnet'; -local secrets = import 'ext/secrets.libsonnet'; -local k8s = import 'utils/k8s.libsonnet'; - -local new_pod_monitor = import 'component/metrics/pod_monitor.libsonnet'; -local new_probe = import 'component/metrics/probe.libsonnet'; -local new_remote_write = import 'component/metrics/remote_write.libsonnet'; -local new_service_monitor = import 'component/metrics/service_monitor.libsonnet'; - -// Generates a metrics_instance. -// -// @param {string} agentNamespace - namespace of the GrafanaAgent -// @param {MetricsInstance} instance -// @param {APIServerConfig} apiServer -// @param {boolean} overrideHonorLabels -// @param {boolean} overrideHonorTimestamps -// @param {boolean} ignoreNamespaceSelectors -// @param {string} enforcedNamespaceLabel -// @param {boolean} enforcedSampleLimit -// @param {boolean} enforcedTargetLimit -// @param {number} shards -function( - agentNamespace, - instance, - apiServer, - overrideHonorLabels, - overrideHonorTimestamps, - ignoreNamespaceSelectors, - enforcedNamespaceLabel, - enforcedSampleLimit, - enforcedTargetLimit, - shards, -) { - local namespace = instance.Instance.ObjectMeta.Namespace, - local spec = instance.Instance.Spec, - - name: '%s/%s' % [namespace, instance.Instance.ObjectMeta.Name], - wal_truncate_frequency: optionals.string(spec.WALTruncateFrequency), - min_wal_time: optionals.string(spec.MinWALTime), - max_wal_time: optionals.string(spec.MaxWALTime), - remote_flush_deadline: optionals.string(spec.RemoteFlushDeadline), - - // WriteStaleOnShutdown is a *bool in the code. We need to check for null-ness here. - write_stale_on_shutdown: - if spec.WriteStaleOnShutdown != null then optionals.bool(spec.WriteStaleOnShutdown), - - remote_write: optionals.array(std.map( - function(rw) new_remote_write(namespace, rw), - spec.RemoteWrite, - )), - - // This is probably the most complicated code fragment in the whole Jsonnet - // codebase. - // - // We've pulled a set of ServiceMonitors, PodMonitors, Probes. - // We need to iterate over all of these and convert them into scrape_configs. - scrape_configs: optionals.array( - // Iterate over ServiceMonitors. ServiceMonitors have a set of Endpoints, - // each of which should be its own scrape_configs, so we have to do a nested - // iteration here. - std.flatMap( - function(sMon) std.mapWithIndex( - function(i, ep) new_service_monitor( - agentNamespace=agentNamespace, - monitor=sMon, - endpoint=ep, - index=i, - apiServer=apiServer, - overrideHonorLabels=overrideHonorLabels, - overrideHonorTimestamps=overrideHonorTimestamps, - ignoreNamespaceSelectors=ignoreNamespaceSelectors, - enforcedNamespaceLabel=enforcedNamespaceLabel, - enforcedSampleLimit=enforcedSampleLimit, - enforcedTargetLimit=enforcedTargetLimit, - shards=shards, - ), - k8s.array(sMon.Spec.Endpoints), - ), - k8s.array(instance.ServiceMonitors), - ) + - - // Iterate over PodMonitors. PodMonitors have a set of PodMetricsEndpoints, - // each of which should be its own scrape_configs, so we have to do a - // nested iteration here. - std.flatMap( - function(pMon) std.mapWithIndex( - function(i, ep) new_pod_monitor( - agentNamespace=agentNamespace, - monitor=pMon, - endpoint=ep, - index=i, - apiServer=apiServer, - overrideHonorLabels=overrideHonorLabels, - overrideHonorTimestamps=overrideHonorTimestamps, - ignoreNamespaceSelectors=ignoreNamespaceSelectors, - enforcedNamespaceLabel=enforcedNamespaceLabel, - enforcedSampleLimit=enforcedSampleLimit, - enforcedTargetLimit=enforcedTargetLimit, - shards=shards, - ), - k8s.array(pMon.Spec.PodMetricsEndpoints), - ), - k8s.array(instance.PodMonitors), - ) + - - // Iterate over Probes. Each probe only converts into one scrape_config. - std.map( - function(probe) new_probe( - agentNamespace=agentNamespace, - probe=probe, - apiServer=apiServer, - overrideHonorTimestamps=overrideHonorTimestamps, - ignoreNamespaceSelectors=ignoreNamespaceSelectors, - enforcedNamespaceLabel=enforcedNamespaceLabel, - enforcedSampleLimit=enforcedSampleLimit, - enforcedTargetLimit=enforcedTargetLimit, - shards=shards, - ), - k8s.array(instance.Probes), - ) + - - // Finally, if the user specified additional scrape configs, we need to - // extract their value from the secret and then unmarshal them into the - // array. - k8s.array( - if spec.AdditionalScrapeConfigs != null then ( - local rawYAML = secrets.valueForSecret(namespace, spec.AdditionalScrapeConfigs); - marshal.fromYAML(rawYAML) - ) - ), - ), -} diff --git a/internal/static/operator/config/templates/utils/k8s.libsonnet b/internal/static/operator/config/templates/utils/k8s.libsonnet deleted file mode 100644 index aa71d843ae..0000000000 --- a/internal/static/operator/config/templates/utils/k8s.libsonnet +++ /dev/null @@ -1,49 +0,0 @@ -{ - // honorLabels calculates the value for honor_labels based on the value - // for honor and override, both of which should be bools. - honorLabels(honor, override):: if honor && override then false else honor, - - // honorTimestamps returns a bool or a null based on the value of honor - // and override. honor should be either a bool or a null. override should be - // a bool. - honorTimestamps(honor, override):: - if honor == null && !override then null - else ( - local shouldHonor = if honor != null then honor else false; - shouldHonor && !override - ), - - // limit calculates a limit based on the user-provided limit and an optional - // enforced limit, which may be null. - limit(user, enforced):: - if enforced == null then user else ( - if (user < enforced) && (user != 0) && (enforced == 0) - then user - else enforced - ), - - // namespacesFromSelector returns a list of namespaces to select in - // kubernetes_sd_config based on the given NamespaceSelector selector, - // string namespace, and whether selectors should be ignored. - namespacesFromSelector(selector, namespace, ignoreSelectors):: - if ignoreSelectors then [namespace] - else if selector.Any == true then [] - else if std.length($.array(selector.MatchNames)) == 0 then - // If no names are manually provided, then the default behavior is to only - // look in the current namespace. - [namespace] - else $.array(selector.MatchNames), - - // sanitize sanitizes text for label safety. - sanitize(text):: std.native('sanitize')(text), - - // intOrString returns the string value of *intstr.IntOrString. - intOrString(obj):: - if obj == null then '' - else if obj.StrVal != '' then obj.StrVal - else if obj.IntVal != 0 then std.toString(obj.IntVal) - else '', - - // array treats val is a Go slice, where null is the same as an empty array. - array(val):: if val != null then val else [], -} diff --git a/internal/static/operator/config/utils.go b/internal/static/operator/config/utils.go deleted file mode 100644 index 3d53474f89..0000000000 --- a/internal/static/operator/config/utils.go +++ /dev/null @@ -1,132 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "regexp" - - "github.com/fatih/structs" - jsonnet "github.com/google/go-jsonnet" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "sigs.k8s.io/yaml" -) - -func unmarshalYAML(i []interface{}) (interface{}, error) { - text, ok := i[0].(string) - if !ok { - return nil, jsonnet.RuntimeError{Msg: "unmarshalYAML text argument must be a string"} - } - var v interface{} - err := yaml.Unmarshal([]byte(text), &v) - if err != nil { - return nil, jsonnet.RuntimeError{Msg: err.Error()} - } - return v, nil -} - -// trimMap recursively deletes fields from m whose value is nil. -func trimMap(m map[string]interface{}) { - for k, v := range m { - if v == nil { - delete(m, k) - continue - } - - if next, ok := v.(map[string]interface{}); ok { - trimMap(next) - } - - if arr, ok := v.([]interface{}); ok { - m[k] = trimSlice(arr) - } - } -} - -func trimSlice(s []interface{}) []interface{} { - res := make([]interface{}, 0, len(s)) - - for _, e := range s { - if e == nil { - continue - } - - if next, ok := e.([]interface{}); ok { - e = trimSlice(next) - } - - if next, ok := e.(map[string]interface{}); ok { - trimMap(next) - } - - res = append(res, e) - } - - return res -} - -// intoStages converts a yaml slice of stages into a Jsonnet array. -func intoStages(i []interface{}) (interface{}, error) { - text, ok := i[0].(string) - if !ok { - return nil, jsonnet.RuntimeError{Msg: "text argument not string"} - } - - // The way this works is really, really gross. We only need any of this - // because Kubernetes CRDs can't recursively define types, which we need - // for the match stage. - // - // 1. Convert YAML -> map[string]interface{} - // 2. Convert map[string]interface{} -> JSON - // 3. Convert JSON -> []*grafana.PipelineStageSpec - // 4. Convert []*grafana.PipelineStageSpec into []interface{}, where - // each interface{} has the type information lost so marshaling it - // again to JSON doesn't break anything. - var raw interface{} - if err := yaml.Unmarshal([]byte(text), &raw); err != nil { - return nil, jsonnet.RuntimeError{ - Msg: fmt.Sprintf("failed to unmarshal stages: %s", err.Error()), - } - } - - bb, err := json.Marshal(raw) - if err != nil { - return nil, jsonnet.RuntimeError{ - Msg: fmt.Sprintf("failed to unmarshal stages: %s", err.Error()), - } - } - - var ps []*gragent.PipelineStageSpec - if err := json.Unmarshal(bb, &ps); err != nil { - return nil, jsonnet.RuntimeError{ - Msg: fmt.Sprintf("failed to unmarshal stages: %s", err.Error()), - } - } - - // Then we need to convert each into their raw types. - rawPS := make([]interface{}, 0, len(ps)) - for _, stage := range ps { - bb, err := json.Marshal(structs.Map(stage)) - if err != nil { - return nil, jsonnet.RuntimeError{ - Msg: fmt.Sprintf("failed to unmarshal stages: %s", err.Error()), - } - } - - var v interface{} - if err := json.Unmarshal(bb, &v); err != nil { - return nil, jsonnet.RuntimeError{ - Msg: fmt.Sprintf("failed to unmarshal stages: %s", err.Error()), - } - } - - rawPS = append(rawPS, v) - } - return rawPS, nil -} - -var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - -// SanitizeLabelName sanitizes a label name for Prometheus. -func SanitizeLabelName(name string) string { - return invalidLabelCharRE.ReplaceAllString(name, "_") -} diff --git a/internal/static/operator/config/utils_test.go b/internal/static/operator/config/utils_test.go deleted file mode 100644 index a514dc2d78..0000000000 --- a/internal/static/operator/config/utils_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package config - -import ( - "testing" - - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" -) - -func Test_unmarshalYAML(t *testing.T) { - in := ` -- a: 5 -` - - out, err := unmarshalYAML([]interface{}{in}) - require.NoError(t, err) - - bb, err := yaml.Marshal(out) - require.NoError(t, err) - - require.YAMLEq(t, in, string(bb)) -} diff --git a/internal/static/operator/defaults.go b/internal/static/operator/defaults.go deleted file mode 100644 index d62a46a1ec..0000000000 --- a/internal/static/operator/defaults.go +++ /dev/null @@ -1,15 +0,0 @@ -package operator - -// Supported versions of the Grafana Agent. -var ( - DefaultAgentVersion = "v0.40.0" - DefaultAgentBaseImage = "grafana/agent" - DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion -) - -// Defaults for Prometheus Config Reloader. -var ( - DefaultConfigReloaderVersion = "v0.67.1" - DefaultConfigReloaderBaseImage = "quay.io/prometheus-operator/prometheus-config-reloader" - DefaultConfigReloaderImage = DefaultConfigReloaderBaseImage + ":" + DefaultConfigReloaderVersion -) diff --git a/internal/static/operator/defaults.go.t b/internal/static/operator/defaults.go.t deleted file mode 100644 index fe5c2b2b70..0000000000 --- a/internal/static/operator/defaults.go.t +++ /dev/null @@ -1,15 +0,0 @@ -package operator - -// Supported versions of the Grafana Agent. -var ( - DefaultAgentVersion = "$AGENT_VERSION" - DefaultAgentBaseImage = "grafana/agent" - DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion -) - -// Defaults for Prometheus Config Reloader. -var ( - DefaultConfigReloaderVersion = "v0.67.1" - DefaultConfigReloaderBaseImage = "quay.io/prometheus-operator/prometheus-config-reloader" - DefaultConfigReloaderImage = DefaultConfigReloaderBaseImage + ":" + DefaultConfigReloaderVersion -) diff --git a/internal/static/operator/hierarchy/hierarchy.go b/internal/static/operator/hierarchy/hierarchy.go deleted file mode 100644 index 24019f24d0..0000000000 --- a/internal/static/operator/hierarchy/hierarchy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Package hierarchy provides tools to discover a resource hierarchy. A -// resource hierarchy is made when a resource has a set of rules to discover -// other resources. -package hierarchy - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// Notifier can be attached to a controller and generate reconciles when -// objects inside of a resource hierarchy change. -type Notifier struct { - log log.Logger - client client.Client - - watchersMut sync.RWMutex - watchers map[schema.GroupVersionKind][]Watcher -} - -// Watcher is something watching for changes to a resource. -type Watcher struct { - Object client.Object // Object to watch for events against. - Owner client.ObjectKey // Owner to receive a reconcile for. - Selector Selector // Selector to use to match changed objects. -} - -// NewNotifier creates a new Notifier which uses the provided client for -// performing hierarchy lookups. -func NewNotifier(l log.Logger, cli client.Client) *Notifier { - return &Notifier{ - log: l, - client: cli, - watchers: make(map[schema.GroupVersionKind][]Watcher), - } -} - -// EventHandler returns an event handler that can be given to -// controller.Watches. -// -// controller.Watches should be called once per type in the resource hierarchy. -// Each call to controller.Watches should use the same Notifier. -func (n *Notifier) EventHandler() handler.EventHandler { - // TODO(rfratto): It's possible to create a custom implementation of - // source.Source so we wouldn't have to call controller.Watches a bunch of - // times. I played around a little with an implementation but it was going to - // be a lot of work to dynamically spin up/down informers, so I put it aside - // for now. Maybe it's an improvement for the future. - return ¬ifierEventHandler{Notifier: n} -} - -// Notify configures reconciles to be generated for a set of watchers when -// watched resources change. -// -// Notify appends to the list of watchers. To remove out notifications for a -// specific owner, call StopNotify. -func (n *Notifier) Notify(watchers ...Watcher) error { - n.watchersMut.Lock() - defer n.watchersMut.Unlock() - - for _, w := range watchers { - gvk, err := apiutil.GVKForObject(w.Object, n.client.Scheme()) - if err != nil { - return fmt.Errorf("could not get GVK: %w", err) - } - - n.watchers[gvk] = append(n.watchers[gvk], w) - } - - return nil -} - -// StopNotify removes all watches for a specific owner. -func (n *Notifier) StopNotify(owner client.ObjectKey) { - n.watchersMut.Lock() - defer n.watchersMut.Unlock() - - for key, watchers := range n.watchers { - rem := make([]Watcher, 0, len(watchers)) - for _, w := range watchers { - if w.Owner != owner { - rem = append(rem, w) - } - } - n.watchers[key] = rem - } -} - -type notifierEventHandler struct { - *Notifier -} - -var _ handler.EventHandler = (*notifierEventHandler)(nil) - -func (h *notifierEventHandler) Create(ctx context.Context, ev event.CreateEvent, q workqueue.RateLimitingInterface) { - h.handleEvent(ev.Object, q) -} - -func (h *notifierEventHandler) Update(ctx context.Context, ev event.UpdateEvent, q workqueue.RateLimitingInterface) { - h.handleEvent(ev.ObjectOld, q) - h.handleEvent(ev.ObjectNew, q) -} - -func (h *notifierEventHandler) Delete(ctx context.Context, ev event.DeleteEvent, q workqueue.RateLimitingInterface) { - h.handleEvent(ev.Object, q) -} - -func (h *notifierEventHandler) Generic(ctx context.Context, ev event.GenericEvent, q workqueue.RateLimitingInterface) { - h.handleEvent(ev.Object, q) -} - -func (h *notifierEventHandler) handleEvent(obj client.Object, q workqueue.RateLimitingInterface) { - h.watchersMut.RLock() - defer h.watchersMut.RUnlock() - - gvk, err := apiutil.GVKForObject(obj, h.client.Scheme()) - if err != nil { - level.Error(h.log).Log("msg", "failed to get gvk for object", "err", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // Iterate through all of the watchers for the gvk and check to see if we - // should trigger a reconcile. - for _, watcher := range h.watchers[gvk] { - matches, err := watcher.Selector.Matches(ctx, h.client, obj) - if err != nil { - level.Error(h.log).Log("msg", "failed to handle notifier event", "err", err) - return - } - if matches { - q.Add(reconcile.Request{NamespacedName: watcher.Owner}) - } - } -} diff --git a/internal/static/operator/hierarchy/hierarchy_test.go b/internal/static/operator/hierarchy/hierarchy_test.go deleted file mode 100644 index a26c839c2d..0000000000 --- a/internal/static/operator/hierarchy/hierarchy_test.go +++ /dev/null @@ -1,146 +0,0 @@ -//go:build !nonetwork && !nodocker && !race - -package hierarchy - -import ( - "context" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/util/k8s" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/event" -) - -// TestNotifier tests that notifier properly handles events for changed -// objects. -func TestNotifier(t *testing.T) { - // TODO: this is broken with go 1.20.6 - // waiting on https://github.com/testcontainers/testcontainers-go/issues/1359 - t.Skip() - l := log.NewNopLogger() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - cluster, err := k8s.NewCluster(ctx, k8s.Options{}) - require.NoError(t, err) - defer cluster.Stop() - - cli := cluster.Client() - - // Tests will rely on a namespace existing, so let's create a namespace with - // some labels. - testNs := v1.Namespace{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Namespace", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "enqueue-test", - Labels: map[string]string{"foo": "bar"}, - }, - } - err = cli.Create(ctx, &testNs) - require.NoError(t, err) - - testPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "enqueue-test", - Labels: map[string]string{"fizz": "buzz"}, - }} - - tt := []struct { - name string - sel Selector - expectEnqueue bool - }{ - { - name: "no watchers", - sel: nil, - expectEnqueue: false, - }, - { - name: "matches watcher", - sel: &LabelsSelector{ - NamespaceName: "enqueue-test", - NamespaceLabels: parseSelector(t, "foo in (bar)"), - Labels: parseSelector(t, "fizz in (buzz)"), - }, - expectEnqueue: true, - }, - { - name: "matches watcher with explicit namespace", - sel: &LabelsSelector{ - NamespaceName: "enqueue-test", - Labels: parseSelector(t, "fizz in (buzz)"), - }, - expectEnqueue: true, - }, - { - name: "bad namespace name selector", - sel: &LabelsSelector{ - NamespaceName: "default", - Labels: labels.Everything(), - }, - expectEnqueue: false, - }, - { - name: "bad namespace label selector", - sel: &LabelsSelector{ - NamespaceName: "enqueue-test", - NamespaceLabels: parseSelector(t, "foo notin (bar)"), - Labels: labels.Everything(), - }, - expectEnqueue: false, - }, - { - name: "bad label selector", - sel: &LabelsSelector{ - NamespaceName: "default", - NamespaceLabels: labels.Everything(), - Labels: parseSelector(t, "fizz notin (buzz)"), - }, - expectEnqueue: false, - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - limiter := workqueue.DefaultControllerRateLimiter() - q := workqueue.NewRateLimitingQueue(limiter) - - notifier := NewNotifier(l, cli) - - if tc.sel != nil { - err := notifier.Notify(Watcher{ - Object: &v1.Pod{}, - Owner: types.NamespacedName{Name: "watcher", Namespace: "enqueue-test"}, - Selector: tc.sel, - }) - require.NoError(t, err) - } - - e := notifier.EventHandler() - e.Create(ctx, event.CreateEvent{Object: testPod}, q) - if tc.expectEnqueue { - require.Equal(t, 1, q.Len(), "expected change enqueue") - } else { - require.Equal(t, 0, q.Len(), "no changes should have been enqueued") - } - }) - } -} - -func parseSelector(t *testing.T, selector string) labels.Selector { - t.Helper() - s, err := labels.Parse(selector) - require.NoError(t, err) - return s -} diff --git a/internal/static/operator/hierarchy/list.go b/internal/static/operator/hierarchy/list.go deleted file mode 100644 index 016fdc33da..0000000000 --- a/internal/static/operator/hierarchy/list.go +++ /dev/null @@ -1,50 +0,0 @@ -package hierarchy - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// List will populate list with elements that match sel. -func List(ctx context.Context, cli client.Client, list client.ObjectList, sel Selector) error { - if err := cli.List(ctx, list, sel); err != nil { - return fmt.Errorf("list failed: %w", err) - } - if err := filterList(ctx, cli, list, sel); err != nil { - return fmt.Errorf("filter failed: %w", err) - } - return nil -} - -// filterList updates the provided list to only elements which match sel. -func filterList(ctx context.Context, cli client.Client, list client.ObjectList, sel Selector) error { - allElements, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("failed to get list: %w", err) - } - - filtered := make([]runtime.Object, 0, len(allElements)) - for _, element := range allElements { - obj, ok := element.(client.Object) - if !ok { - return fmt.Errorf("unexpected object of type %T in list", element) - } - - matches, err := sel.Matches(ctx, cli, obj) - if err != nil { - return fmt.Errorf("failed to validate object: %w", err) - } - if matches { - filtered = append(filtered, obj) - } - } - - if err := meta.SetList(list, filtered); err != nil { - return fmt.Errorf("failed to update list: %w", err) - } - return nil -} diff --git a/internal/static/operator/hierarchy/selector.go b/internal/static/operator/hierarchy/selector.go deleted file mode 100644 index d2a1493a2c..0000000000 --- a/internal/static/operator/hierarchy/selector.go +++ /dev/null @@ -1,86 +0,0 @@ -package hierarchy - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Selector finding objects within the resource hierarchy. -type Selector interface { - // ListOption can be passed to List to perform initial filtering of returned - // objects. - client.ListOption - - // Matches returns true if the Selector matches the provided Object. The - // provided Client may be used to perform extra searches. - Matches(context.Context, client.Client, client.Object) (bool, error) -} - -// LabelsSelector is used for discovering a set of objects in a hierarchy based -// on labels. -type LabelsSelector struct { - // NamespaceName is the default namespace to search for objects in when - // NamespaceSelector is nil. - NamespaceName string - - // NamespaceLabels causes all namespaces whose labels match NamespaceLabels - // to be searched. When nil, only the namespace specified by NamespaceName - // will be searched. - NamespaceLabels labels.Selector - - // Labels discovers all objects whose labels match the selector. If nil, - // no objects will be discovered. - Labels labels.Selector -} - -var _ Selector = (*LabelsSelector)(nil) - -// ApplyToList implements Selector. -func (ls *LabelsSelector) ApplyToList(lo *client.ListOptions) { - if ls.NamespaceLabels == nil { - lo.Namespace = ls.NamespaceName - } - lo.LabelSelector = ls.Labels -} - -// Matches implements Selector. -func (ls *LabelsSelector) Matches(ctx context.Context, cli client.Client, o client.Object) (bool, error) { - if !ls.Labels.Matches(labels.Set(o.GetLabels())) { - return false, nil - } - - // Fast path: we don't need to retrieve the labels of the namespace. - if ls.NamespaceLabels == nil { - return o.GetNamespace() == ls.NamespaceName, nil - } - - // Slow path: we need to look up the namespace to see if its labels match. As - // long as cli implements caching, this won't be too bad. - var ns corev1.Namespace - if err := cli.Get(ctx, client.ObjectKey{Name: o.GetNamespace()}, &ns); err != nil { - return false, fmt.Errorf("error looking up namespace %q: %w", o.GetNamespace(), err) - } - return ls.NamespaceLabels.Matches(labels.Set(ns.GetLabels())), nil -} - -// KeySelector is used for discovering a single object based on namespace and -// name. -type KeySelector struct { - Namespace, Name string -} - -var _ Selector = (*KeySelector)(nil) - -// ApplyToList implements Selector. -func (ks *KeySelector) ApplyToList(lo *client.ListOptions) { - lo.Namespace = ks.Namespace -} - -// Matches implements Selector. -func (ks *KeySelector) Matches(ctx context.Context, cli client.Client, o client.Object) (bool, error) { - return ks.Name == o.GetName() && ks.Namespace == o.GetNamespace(), nil -} diff --git a/internal/static/operator/kubelet.go b/internal/static/operator/kubelet.go deleted file mode 100644 index 1e76875519..0000000000 --- a/internal/static/operator/kubelet.go +++ /dev/null @@ -1,158 +0,0 @@ -package operator - -import ( - "context" - "fmt" - "sort" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/operator/clientutil" - "github.com/grafana/agent/internal/static/operator/logutil" - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - controller "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type kubeletReconciler struct { - client.Client - kubeletNamespace, kubeletName string -} - -func (r *kubeletReconciler) Reconcile(ctx context.Context, req controller.Request) (res controller.Result, err error) { - l := logutil.FromContext(ctx) - level.Info(l).Log("msg", "reconciling node") - - var nodes core_v1.NodeList - if err := r.List(ctx, &nodes); err != nil { - level.Error(l).Log("msg", "failed to list nodes for kubelet service", "err", err) - return res, fmt.Errorf("unable to list nodes: %w", err) - } - nodeAddrs, err := getNodeAddrs(l, &nodes) - if err != nil { - level.Error(l).Log("msg", "could not get addresses from all nodes", "err", err) - return res, fmt.Errorf("unable to get addresses from nodes: %w", err) - } - - labels := mergeMaps(managedByOperatorLabels, map[string]string{ - // Labels taken from prometheus-operator: - // https://github.com/prometheus-operator/prometheus-operator/blob/2c81b0cf6a5673e08057499a08ddce396b19dda4/pkg/prometheus/operator.go#L586-L587 - "k8s-app": "kubelet", - "app.kubernetes.io/name": "kubelet", - }) - - svc := &core_v1.Service{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: r.kubeletName, - Namespace: r.kubeletNamespace, - Labels: labels, - }, - Spec: core_v1.ServiceSpec{ - Type: core_v1.ServiceTypeClusterIP, - ClusterIP: "None", - Ports: []core_v1.ServicePort{ - {Name: "https-metrics", Port: 10250}, - {Name: "http-metrics", Port: 10255}, - {Name: "cadvisor", Port: 4194}, - }, - }, - } - - eps := &core_v1.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: r.kubeletName, - Namespace: r.kubeletNamespace, - Labels: labels, - }, - Subsets: []core_v1.EndpointSubset{{ - Addresses: nodeAddrs, - Ports: []core_v1.EndpointPort{ - // Taken from https://github.com/prometheus-operator/prometheus-operator/blob/2c81b0cf6a5673e08057499a08ddce396b19dda4/pkg/prometheus/operator.go#L593 - {Name: "https-metrics", Port: 10250}, - {Name: "http-metrics", Port: 10255}, - {Name: "cadvisor", Port: 4194}, - }, - }}, - } - - level.Debug(l).Log("msg", "reconciling kubelet service", "svc", client.ObjectKeyFromObject(svc)) - err = clientutil.CreateOrUpdateService(ctx, r.Client, svc) - if err != nil { - return res, fmt.Errorf("failed to reconcile kubelet service %s: %w", client.ObjectKeyFromObject(svc), err) - } - - level.Debug(l).Log("msg", "reconciling kubelet endpoints", "eps", client.ObjectKeyFromObject(eps)) - err = clientutil.CreateOrUpdateEndpoints(ctx, r.Client, eps) - if err != nil { - return res, fmt.Errorf("failed to reconcile kubelet endpoints %s: %w", client.ObjectKeyFromObject(eps), err) - } - - return -} - -// mergeMaps merges the contents of b with a. Keys from b take precedence. -func mergeMaps(a, b map[string]string) map[string]string { - res := make(map[string]string) - for k, v := range a { - res[k] = v - } - for k, v := range b { - res[k] = v - } - return res -} - -func getNodeAddrs(l log.Logger, nodes *core_v1.NodeList) (addrs []core_v1.EndpointAddress, err error) { - var failed bool - - for _, n := range nodes.Items { - addr, err := nodeAddress(n) - if err != nil { - level.Error(l).Log("msg", "failed to get address from node", "node", n.Name, "err", err) - failed = true - } - - addrs = append(addrs, core_v1.EndpointAddress{ - IP: addr, - TargetRef: &core_v1.ObjectReference{ - Kind: n.Kind, - APIVersion: n.APIVersion, - Name: n.Name, - UID: n.UID, - }, - }) - } - - if failed { - return nil, fmt.Errorf("failed to get the address from one or more nodes") - } - - // Sort endpoints to reduce performance cost on endpoint watchers - sort.SliceStable(addrs, func(i, j int) bool { - return addrs[i].IP < addrs[j].IP - }) - - return -} - -// nodeAddresses returns the provided node's address, based on the priority: -// -// 1. NodeInternalIP -// 2. NodeExternalIP -// -// Copied from github.com/prometheus/prometheus/discovery/kubernetes/node.go -func nodeAddress(node core_v1.Node) (string, error) { - m := map[core_v1.NodeAddressType][]string{} - for _, a := range node.Status.Addresses { - m[a.Type] = append(m[a.Type], a.Address) - } - - if addresses, ok := m[core_v1.NodeInternalIP]; ok { - return addresses[0], nil - } - if addresses, ok := m[core_v1.NodeExternalIP]; ok { - return addresses[0], nil - } - return "", fmt.Errorf("host address unknown") -} diff --git a/internal/static/operator/kubelet_test.go b/internal/static/operator/kubelet_test.go deleted file mode 100644 index 99e5ff0364..0000000000 --- a/internal/static/operator/kubelet_test.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build !nonetwork && !nodocker && !race - -package operator - -import ( - "context" - "testing" - "time" - - "github.com/grafana/agent/internal/static/operator/logutil" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/k8s" - "github.com/stretchr/testify/require" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - clog "sigs.k8s.io/controller-runtime/pkg/log" -) - -// TestKubelet tests the Kubelet reconciler. -func TestKubelet(t *testing.T) { - // TODO: this is broken with go 1.20.6 - // waiting on https://github.com/testcontainers/testcontainers-go/issues/1359 - t.Skip() - l := util.TestLogger(t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - ctx = clog.IntoContext(ctx, logutil.Wrap(l)) - - cluster, err := k8s.NewCluster(ctx, k8s.Options{}) - require.NoError(t, err) - defer cluster.Stop() - - cli := cluster.Client() - - nodes := []core_v1.Node{ - { - ObjectMeta: meta_v1.ObjectMeta{Name: "node-a"}, - Status: core_v1.NodeStatus{ - Addresses: []core_v1.NodeAddress{ - {Type: core_v1.NodeInternalIP, Address: "10.0.0.10"}, - }, - }, - }, - { - ObjectMeta: meta_v1.ObjectMeta{Name: "node-b"}, - Status: core_v1.NodeStatus{ - Addresses: []core_v1.NodeAddress{ - {Type: core_v1.NodeExternalIP, Address: "10.24.0.11"}, - }, - }, - }, - { - ObjectMeta: meta_v1.ObjectMeta{Name: "node-c"}, - Status: core_v1.NodeStatus{ - Addresses: []core_v1.NodeAddress{ - {Type: core_v1.NodeExternalIP, Address: "10.24.0.12"}, - {Type: core_v1.NodeInternalIP, Address: "10.0.0.12"}, - }, - }, - }, - } - - for _, n := range nodes { - err := cli.Create(ctx, &n) - require.NoError(t, err) - } - - ns := &core_v1.Namespace{ - ObjectMeta: meta_v1.ObjectMeta{Name: "kube-system"}, - } - _ = cli.Create(ctx, ns) - - r := &kubeletReconciler{ - Client: cli, - kubeletNamespace: "kube-system", - kubeletName: "kubelet", - } - _, err = r.Reconcile(ctx, reconcile.Request{}) - require.NoError(t, err) - - var ( - eps core_v1.Endpoints - svc core_v1.Service - - key = types.NamespacedName{Namespace: r.kubeletNamespace, Name: r.kubeletName} - ) - require.NoError(t, cli.Get(ctx, key, &eps)) - require.NoError(t, cli.Get(ctx, key, &svc)) - - require.Len(t, eps.Subsets, 1) - - expect := map[string]string{ - "node-a": "10.0.0.10", - "node-b": "10.24.0.11", - - // When a node has internal and external IPs, use internal first. - "node-c": "10.0.0.12", - } - for nodeName, expectIP := range expect { - var epa *core_v1.EndpointAddress - - for _, addr := range eps.Subsets[0].Addresses { - if addr.TargetRef.Name == nodeName { - epa = &addr - break - } - } - - require.NotNilf(t, epa, "did not find endpoint address for node %s", nodeName) - require.Equalf(t, expectIP, epa.IP, "node %s had incorrect ip address", nodeName) - } -} diff --git a/internal/static/operator/logutil/log.go b/internal/static/operator/logutil/log.go deleted file mode 100644 index b31580cc73..0000000000 --- a/internal/static/operator/logutil/log.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package logutil implements an adaptor for the go-kit logger, which is used in the -// Grafana Agent project, and go-logr, which is used in controller-runtime. -package logutil - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/go-logr/logr" - "k8s.io/klog/v2" - clog "sigs.k8s.io/controller-runtime/pkg/log" -) - -// Wrap wraps a log.Logger into a logr.Logger. -func Wrap(l log.Logger) logr.Logger { - return logr.New(&goKitLogger{l: l}) -} - -// FromContext returns a log.Logger from a context. Panics if the context doesn't -// have a Logger set. -func FromContext(ctx context.Context, kvps ...interface{}) log.Logger { - gkl := clog.FromContext(ctx, kvps...).GetSink().(*goKitLogger) - return gkl.namedLogger() -} - -type goKitLogger struct { - // name is a name field used by logr which can be appended to dynamically. - name string - kvps []interface{} - l log.Logger -} - -var _ logr.LogSink = (*goKitLogger)(nil) - -func (l *goKitLogger) Init(info logr.RuntimeInfo) { - // no-op -} - -func (l *goKitLogger) Enabled(level int) bool { return true } - -func (l *goKitLogger) Info(logLevel int, msg string, keysAndValues ...interface{}) { - args := append([]interface{}{"msg", msg}, keysAndValues...) - level.Info(l.namedLogger()).Log(args...) -} - -func (l *goKitLogger) Error(err error, msg string, keysAndValues ...interface{}) { - args := append([]interface{}{"msg", msg, "err", err}, keysAndValues...) - level.Error(l.namedLogger()).Log(args...) -} - -func (l *goKitLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { - // fix for logs showing "unsupported value type for object references" - if len(keysAndValues) == 2 { - if v, ok := keysAndValues[1].(klog.ObjectRef); ok { - keysAndValues[1] = fmt.Sprintf("%s/%s", v.Namespace, v.Name) - } - } - return &goKitLogger{name: l.name, l: l.l, kvps: append(l.kvps, keysAndValues...)} -} - -// namedLogger gets log.Logger with component applied. -func (l *goKitLogger) namedLogger() log.Logger { - logger := l.l - if l.name != "" { - logger = log.With(logger, "component", l.name) - } - logger = log.With(logger, l.kvps...) - return logger -} - -func (l *goKitLogger) WithName(name string) logr.LogSink { - newName := name - if l.name != "" { - newName = l.name + "." + name - } - return &goKitLogger{name: newName, l: l.l} -} diff --git a/internal/static/operator/operator.go b/internal/static/operator/operator.go deleted file mode 100644 index 12a5791ea6..0000000000 --- a/internal/static/operator/operator.go +++ /dev/null @@ -1,276 +0,0 @@ -package operator - -import ( - "context" - "flag" - "fmt" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - dskit "github.com/grafana/dskit/log" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - controller "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/hierarchy" - promop_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - promop "github.com/prometheus-operator/prometheus-operator/pkg/operator" - apps_v1 "k8s.io/api/apps/v1" - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - // Needed for clients. - _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/client-go/rest" -) - -// Config controls the configuration of the Operator. -type Config struct { - LogLevel dskit.Level - LogFormat string - Labels promop.Labels - Controller controller.Options - AgentSelector string - KubelsetServiceName string - - agentLabelSelector labels.Selector - - // RestConfig used to connect to cluster. One will be generated based on the - // environment if not set. - RestConfig *rest.Config - - // TODO(rfratto): extra settings from Prometheus Operator: - // - // 1. Reloader container image/requests/limits - // 2. Namespaces allow/denylist. - // 3. Namespaces for Prometheus resources. -} - -// NewConfig creates a new Config and initializes default values. -// Flags will be registered against f if it is non-nil. -func NewConfig(f *flag.FlagSet) (*Config, error) { - if f == nil { - f = flag.NewFlagSet("temp", flag.PanicOnError) - } - - var c Config - err := c.registerFlags(f) - if err != nil { - return nil, err - } - return &c, nil -} - -func (c *Config) registerFlags(f *flag.FlagSet) error { - c.LogLevel.RegisterFlags(f) - f.Var(&c.Labels, "labels", "Labels to add to all created operator resources") - f.StringVar(&c.AgentSelector, "agent-selector", "", "Label selector to discover GrafanaAgent CRs. Defaults to all GrafanaAgent CRs.") - var namespace string - var webhookServerOptions webhook.Options - f.StringVar(&namespace, "namespace", "", "Namespace to restrict the Operator to.") // nolint:staticcheck - f.StringVar(&webhookServerOptions.Host, "listen-host", "", "Host to listen on. Empty string means all interfaces.") // nolint:staticcheck - f.IntVar(&webhookServerOptions.Port, "listen-port", 9443, "Port to listen on.") // nolint:staticcheck - f.StringVar(&c.Controller.Metrics.BindAddress, "metrics-listen-address", ":8080", "Address to expose Operator metrics on") - f.StringVar(&c.Controller.HealthProbeBindAddress, "health-listen-address", "", "Address to expose Operator health probes on") - - f.StringVar(&c.KubelsetServiceName, "kubelet-service", "", "Service and Endpoints objects to write kubelets into. Allows for monitoring Kubelet and cAdvisor metrics using a ServiceMonitor. Must be in format \"namespace/name\". If empty, nothing will be created.") - - c.Controller.WebhookServer = webhook.NewServer(webhookServerOptions) - - if namespace != "" { - c.Controller.Cache.DefaultNamespaces = map[string]cache.Config{} - c.Controller.Cache.DefaultNamespaces[namespace] = cache.Config{} - } - - // Custom initial values for the endpoint names. - c.Controller.ReadinessEndpointName = "/-/ready" - c.Controller.LivenessEndpointName = "/-/healthy" - - c.Controller.Scheme = runtime.NewScheme() - for _, add := range []func(*runtime.Scheme) error{ - core_v1.AddToScheme, - apps_v1.AddToScheme, - gragent.AddToScheme, - promop_v1.AddToScheme, - } { - if err := add(c.Controller.Scheme); err != nil { - return fmt.Errorf("unable to register scheme: %w", err) - } - } - - return nil -} - -// Operator is the Grafana Agent Operator. -type Operator struct { - log log.Logger - manager manager.Manager - - // New creates reconcilers to reconcile creating the kubelet service (if - // configured) and Grafana Agent deployments. We store them as - // lazyReconcilers so tests can update what the underlying reconciler - // implementation is. - - kubeletReconciler *lazyReconciler // Unused if kubelet service unconfigured - agentReconciler *lazyReconciler -} - -// New creates a new Operator. -func New(l log.Logger, c *Config) (*Operator, error) { - var ( - lazyKubeletReconciler, lazyAgentReconciler lazyReconciler - ) - - restConfig := c.RestConfig - if restConfig == nil { - restConfig = controller.GetConfigOrDie() - } - manager, err := controller.NewManager(restConfig, c.Controller) - if err != nil { - return nil, fmt.Errorf("failed to create manager: %w", err) - } - - if err := manager.AddReadyzCheck("running", healthz.Ping); err != nil { - level.Warn(l).Log("msg", "failed to set up 'running' readyz check", "err", err) - } - if err := manager.AddHealthzCheck("running", healthz.Ping); err != nil { - level.Warn(l).Log("msg", "failed to set up 'running' healthz check", "err", err) - } - - var ( - agentPredicates []predicate.Predicate - - notifier = hierarchy.NewNotifier(log.With(l, "component", "hierarchy_notifier"), manager.GetClient()) - notifierHandler = notifier.EventHandler() - ) - - // Initialize agentPredicates if an GrafanaAgent selector is configured. - if c.AgentSelector != "" { - sel, err := meta_v1.ParseToLabelSelector(c.AgentSelector) - if err != nil { - return nil, fmt.Errorf("unable to create predicate for selecting GrafanaAgent CRs: %w", err) - } - c.agentLabelSelector, err = meta_v1.LabelSelectorAsSelector(sel) - if err != nil { - return nil, fmt.Errorf("unable to create predicate for selecting GrafanaAgent CRs: %w", err) - } - selPredicate, err := predicate.LabelSelectorPredicate(*sel) - if err != nil { - return nil, fmt.Errorf("unable to create predicate for selecting GrafanaAgent CRs: %w", err) - } - agentPredicates = append(agentPredicates, selPredicate) - } - - if c.KubelsetServiceName != "" { - parts := strings.Split(c.KubelsetServiceName, "/") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid format for kubelet-service %q, must be formatted as \"namespace/name\"", c.KubelsetServiceName) - } - kubeletNamespace := parts[0] - kubeletName := parts[1] - - err := controller.NewControllerManagedBy(manager). - For(&core_v1.Node{}). - Owns(&core_v1.Service{}). - Owns(&core_v1.Endpoints{}). - Complete(&lazyKubeletReconciler) - if err != nil { - return nil, fmt.Errorf("failed to create kubelet controller: %w", err) - } - - lazyKubeletReconciler.Set(&kubeletReconciler{ - Client: manager.GetClient(), - - kubeletNamespace: kubeletNamespace, - kubeletName: kubeletName, - }) - } - - err = controller.NewControllerManagedBy(manager). - For(&gragent.GrafanaAgent{}, builder.WithPredicates(agentPredicates...)). - Owns(&apps_v1.StatefulSet{}). - Owns(&apps_v1.DaemonSet{}). - Owns(&apps_v1.Deployment{}). - Owns(&core_v1.Secret{}). - Owns(&core_v1.Service{}). - Watches(&core_v1.Secret{}, notifierHandler). - Watches(&gragent.LogsInstance{}, notifierHandler). - Watches(&gragent.PodLogs{}, notifierHandler). - Watches(&gragent.MetricsInstance{}, notifierHandler). - Watches(&gragent.Integration{}, notifierHandler). - Watches(&promop_v1.PodMonitor{}, notifierHandler). - Watches(&promop_v1.Probe{}, notifierHandler). - Watches(&promop_v1.ServiceMonitor{}, notifierHandler). - Watches(&core_v1.Secret{}, notifierHandler). - Watches(&core_v1.ConfigMap{}, notifierHandler). - Complete(&lazyAgentReconciler) - if err != nil { - return nil, fmt.Errorf("failed to create GrafanaAgent controller: %w", err) - } - - lazyAgentReconciler.Set(&reconciler{ - Client: manager.GetClient(), - scheme: manager.GetScheme(), - notifier: notifier, - config: c, - }) - - return &Operator{ - log: l, - manager: manager, - - kubeletReconciler: &lazyKubeletReconciler, - agentReconciler: &lazyAgentReconciler, - }, nil -} - -// Start starts the operator. It will run until ctx is canceled. -func (o *Operator) Start(ctx context.Context) error { - return o.manager.Start(ctx) -} - -type lazyReconciler struct { - mut sync.RWMutex - inner reconcile.Reconciler -} - -// Get returns the current reconciler. -func (lr *lazyReconciler) Get() reconcile.Reconciler { - lr.mut.RLock() - defer lr.mut.RUnlock() - return lr.inner -} - -// Set updates the current reconciler. -func (lr *lazyReconciler) Set(inner reconcile.Reconciler) { - lr.mut.Lock() - defer lr.mut.Unlock() - lr.inner = inner -} - -// Wrap wraps the current reconciler with a middleware. -func (lr *lazyReconciler) Wrap(mw func(next reconcile.Reconciler) reconcile.Reconciler) { - lr.mut.Lock() - defer lr.mut.Unlock() - lr.inner = mw(lr.inner) -} - -// Reconcile calls Reconcile against the current reconciler. -func (lr *lazyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - lr.mut.RLock() - defer lr.mut.RUnlock() - if lr.inner == nil { - return reconcile.Result{}, fmt.Errorf("no reconciler") - } - return lr.inner.Reconcile(ctx, req) -} diff --git a/internal/static/operator/operator_test.go b/internal/static/operator/operator_test.go deleted file mode 100644 index 6ab889e613..0000000000 --- a/internal/static/operator/operator_test.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build !nonetwork && !nodocker && !race - -package operator - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/operator/logutil" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/k8s" - "github.com/grafana/agent/internal/util/subset" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/yaml" -) - -// TestMetricsInstance deploys a basic MetricsInstance and validates expected -// resources were applied. -func TestMetricsInstance(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - inFile := "./testdata/test-metrics-instance.in.yaml" - outFile := "./testdata/test-metrics-instance.out.yaml" - ReconcileTest(ctx, t, inFile, outFile) -} - -func TestCustomMounts(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - inFile := "./testdata/test-custom-mounts.in.yaml" - outFile := "./testdata/test-custom-mounts.out.yaml" - ReconcileTest(ctx, t, inFile, outFile) -} - -func TestIntegrations(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - inFile := "./testdata/test-integrations.in.yaml" - outFile := "./testdata/test-integrations.out.yaml" - ReconcileTest(ctx, t, inFile, outFile) -} - -// ReconcileTest deploys a cluster and runs the operator against it locally. It -// then does the following: -// -// 1. Deploys all resources in inFile, assuming a Reconcile will retrigger from -// them -// -// 2. Loads the resources specified by outFile and checks if the equivalent -// existing resources in the cluster are subsets of the loaded outFile -// resources. -// -// The second step will run in a loop until the test passes or ctx is canceled. -// -// ReconcileTest cannot be used to check that the data of a Secret or a -// ConfigMap is a subset of expected data. -func ReconcileTest(ctx context.Context, t *testing.T, inFile, outFile string) { - t.Helper() - - var wg sync.WaitGroup - defer wg.Wait() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - l := util.TestLogger(t) - cluster := NewTestCluster(ctx, t, l) - - cfg := NewTestConfig(t, cluster) - op, err := New(l, cfg) - require.NoError(t, err) - - // Deploy input resources - resources := k8s.NewResourceSet(l, cluster) - defer resources.Stop() - require.NoError(t, resources.AddFile(ctx, inFile)) - - // Start the operator. - wg.Add(1) - go func() { - defer wg.Done() - err := op.Start(ctx) - require.NoError(t, err) - }() - - // Load our expected resources, and then get the real resource for each and - // ensure that it overlaps with our expected object. - expectedFile, err := os.Open(outFile) - require.NoError(t, err) - defer expectedFile.Close() - - expectedSet, err := k8s.ReadUnstructuredObjects(expectedFile) - require.NoError(t, err) - - for _, expected := range expectedSet { - err := k8s.Wait(ctx, l, func() error { - var actual unstructured.Unstructured - actual.SetGroupVersionKind(expected.GroupVersionKind()) - - objKey := client.ObjectKeyFromObject(expected) - - err := cluster.Client().Get(ctx, objKey, &actual) - if err != nil { - return fmt.Errorf("failed to get resource: %w", err) - } - - expectedBytes, err := yaml.Marshal(expected) - if err != nil { - return fmt.Errorf("failed to marshal expected: %w", err) - } - - actualBytes, err := yaml.Marshal(&actual) - if err != nil { - return fmt.Errorf("failed to marshal actual: %w", err) - } - - err = subset.YAMLAssert(expectedBytes, actualBytes) - if err != nil { - return fmt.Errorf("assert failed for %s: %w", objKey, err) - } - return nil - }) - - require.NoError(t, err) - } -} - -// NewTestCluster creates a new testing cluster. The cluster will be removed -// when the test completes. -func NewTestCluster(ctx context.Context, t *testing.T, l log.Logger) *k8s.Cluster { - // TODO: this is broken with go 1.20.6 - // waiting on https://github.com/testcontainers/testcontainers-go/issues/1359 - t.Skip() - t.Helper() - - cluster, err := k8s.NewCluster(ctx, k8s.Options{}) - require.NoError(t, err) - t.Cleanup(cluster.Stop) - - // Apply CRDs to cluster - crds := k8s.NewResourceSet(l, cluster) - t.Cleanup(crds.Stop) - - crdPaths, err := filepath.Glob("../../production/operator/crds/*.yaml") - require.NoError(t, err) - - for _, crd := range crdPaths { - err := crds.AddFile(ctx, crd) - require.NoError(t, err) - } - - require.NoError(t, crds.Wait(ctx), "CRDs did not get created successfully") - return cluster -} - -// NewTestConfig generates a new base operator Config used for tests. -func NewTestConfig(t *testing.T, cluster *k8s.Cluster) *Config { - t.Helper() - - cfg, err := NewConfig(nil) - require.NoError(t, err) - - cfg.RestConfig = cluster.GetConfig() - cfg.Controller.Logger = logutil.Wrap(util.TestLogger(t)) - - // Listen on any port for testing purposes - cfg.Controller.WebhookServer = webhook.NewServer(webhook.Options{ - Port: 0, - }) - cfg.Controller.Metrics.BindAddress = "127.0.0.1:0" - cfg.Controller.HealthProbeBindAddress = "127.0.0.1:0" - - return cfg -} diff --git a/internal/static/operator/reconciler.go b/internal/static/operator/reconciler.go deleted file mode 100644 index 05708aaa1e..0000000000 --- a/internal/static/operator/reconciler.go +++ /dev/null @@ -1,136 +0,0 @@ -package operator - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/clientutil" - "github.com/grafana/agent/internal/static/operator/config" - "github.com/grafana/agent/internal/static/operator/hierarchy" - "github.com/grafana/agent/internal/static/operator/logutil" - "github.com/prometheus/prometheus/model/labels" - core_v1 "k8s.io/api/core/v1" - k8s_errors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - controller "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type reconciler struct { - client.Client - scheme *runtime.Scheme - config *Config - - notifier *hierarchy.Notifier -} - -func (r *reconciler) Reconcile(ctx context.Context, req controller.Request) (controller.Result, error) { - l := logutil.FromContext(ctx) - level.Info(l).Log("msg", "reconciling grafana-agent") - defer level.Debug(l).Log("msg", "done reconciling grafana-agent") - - // Reset our notifications while we re-handle the reconcile. - r.notifier.StopNotify(req.NamespacedName) - - var agent gragent.GrafanaAgent - if err := r.Get(ctx, req.NamespacedName, &agent); k8s_errors.IsNotFound(err) { - level.Debug(l).Log("msg", "detected deleted agent") - return controller.Result{}, nil - } else if err != nil { - level.Error(l).Log("msg", "unable to get grafana-agent", "err", err) - return controller.Result{}, nil - } - - if agent.Spec.Paused { - return controller.Result{}, nil - } - - if r.config.agentLabelSelector != nil && !r.config.agentLabelSelector.Matches(labels.FromMap(agent.ObjectMeta.Labels)) { - level.Debug(l).Log("msg", "grafana-agent does not match agent selector. Skipping reconcile") - return controller.Result{}, nil - } - - deployment, watchers, err := buildHierarchy(ctx, l, r.Client, &agent) - if err != nil { - level.Error(l).Log("msg", "unable to build hierarchy", "err", err) - return controller.Result{}, nil - } - if err := r.notifier.Notify(watchers...); err != nil { - level.Error(l).Log("msg", "unable to update notifier", "err", err) - return controller.Result{}, nil - } - - type reconcileFunc func(context.Context, log.Logger, gragent.Deployment) error - actors := []reconcileFunc{ - // Operator-wide resources - r.createSecrets, - - // Metrics resources (may be a no-op if no metrics configured) - r.createMetricsConfigurationSecret, - r.createMetricsGoverningService, - r.createMetricsStatefulSets, - - // Logs resources (may be a no-op if no logs configured) - r.createLogsConfigurationSecret, - r.createLogsDaemonSet, - - // Integration resources (may be a no-op if no integrations configured) - r.newIntegrationsDeploymentSecret, - r.newIntegrationsDaemonSetSecret, - r.newIntegrationsDeployment, - r.newIntegrationsDaemonSet, - } - for _, actor := range actors { - err := actor(ctx, l, deployment) - if err != nil { - level.Error(l).Log("msg", "error during reconciling", "err", err) - return controller.Result{Requeue: true}, nil - } - } - - return controller.Result{}, nil -} - -// createSecrets creates secrets from the secret store. -func (r *reconciler) createSecrets( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - blockOwnerDeletion := true - - data := make(map[string][]byte) - for k, value := range d.Secrets { - data[config.SanitizeLabelName(string(k))] = []byte(value) - } - - secret := core_v1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Namespace: d.Agent.Namespace, - Name: fmt.Sprintf("%s-secrets", d.Agent.Name), - OwnerReferences: []v1.OwnerReference{{ - APIVersion: d.Agent.APIVersion, - BlockOwnerDeletion: &blockOwnerDeletion, - Kind: d.Agent.Kind, - Name: d.Agent.Name, - UID: d.Agent.UID, - }}, - Labels: map[string]string{ - managedByOperatorLabel: managedByOperatorLabelValue, - }, - }, - Data: data, - } - - level.Info(l).Log("msg", "reconciling secret", "secret", secret.Name) - err := clientutil.CreateOrUpdateSecret(ctx, r.Client, &secret) - if err != nil { - return fmt.Errorf("failed to reconcile secret: %w", err) - } - return nil -} diff --git a/internal/static/operator/reconciler_integrations.go b/internal/static/operator/reconciler_integrations.go deleted file mode 100644 index 46eb873907..0000000000 --- a/internal/static/operator/reconciler_integrations.go +++ /dev/null @@ -1,122 +0,0 @@ -package operator - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/clientutil" - "github.com/grafana/agent/internal/static/operator/config" - apps_v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" -) - -func (r *reconciler) newIntegrationsDeploymentSecret( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - // The Deployment for integrations only has integrations where AllNodes is - // false. - d = deploymentIntegrationSubset(d, false) - - name := fmt.Sprintf("%s-integrations-deploy-config", d.Agent.Name) - return r.createTelemetryConfigurationSecret(ctx, l, name, d, config.IntegrationsType) -} - -func (r *reconciler) newIntegrationsDaemonSetSecret( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - // The DaemonSet for integrations only has integrations where AllNodes is - // true. - d = deploymentIntegrationSubset(d, true) - - name := fmt.Sprintf("%s-integrations-ds-config", d.Agent.Name) - return r.createTelemetryConfigurationSecret(ctx, l, name, d, config.IntegrationsType) -} - -func deploymentIntegrationSubset(d gragent.Deployment, allNodes bool) gragent.Deployment { - res := *d.DeepCopy() - - filteredIntegrations := make([]gragent.IntegrationsDeployment, 0, len(d.Integrations)) - for _, i := range d.Integrations { - if i.Instance.Spec.Type.AllNodes == allNodes { - filteredIntegrations = append(filteredIntegrations, i) - } - } - - res.Integrations = filteredIntegrations - return res -} - -func (r *reconciler) newIntegrationsDeployment( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - // The Deployment for integrations only has integrations where AllNodes is - // false. - d = deploymentIntegrationSubset(d, false) - - name := fmt.Sprintf("%s-integrations-deploy", d.Agent.Name) - deploy, err := newIntegrationsDeployment(r.config, name, d) - if err != nil { - return fmt.Errorf("failed to generate integrations Deployment: %w", err) - } - key := types.NamespacedName{Namespace: deploy.Namespace, Name: deploy.Name} - - if len(d.Integrations) == 0 { - // There's nothing to deploy; delete anything that might've been deployed - // from a previous reconcile. - level.Info(l).Log("msg", "deleting integrations Deployment", "deploy", key) - var deploy apps_v1.Deployment - return deleteManagedResource(ctx, r.Client, key, &deploy) - } - - level.Info(l).Log("msg", "reconciling integrations Deployment", "deploy", key) - err = clientutil.CreateOrUpdateDeployment(ctx, r.Client, deploy, l) - if err != nil { - return fmt.Errorf("failed to reconcile integrations Deployment: %w", err) - } - return nil -} - -func (r *reconciler) newIntegrationsDaemonSet( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - // The DaemonSet for integrations only has integrations where AllNodes is - // true. - d = deploymentIntegrationSubset(d, true) - - name := fmt.Sprintf("%s-integrations-ds", d.Agent.Name) - ds, err := newIntegrationsDaemonSet(r.config, name, d) - if err != nil { - return fmt.Errorf("failed to generate integrations DaemonSet: %w", err) - } - key := types.NamespacedName{Namespace: ds.Namespace, Name: ds.Name} - - if len(d.Integrations) == 0 { - // There's nothing to deploy; delete anything that might've been deployed - // from a previous reconcile. - level.Info(l).Log("msg", "deleting integrations DaemonSet", "ds", key) - var ds apps_v1.DaemonSet - return deleteManagedResource(ctx, r.Client, key, &ds) - } - - level.Info(l).Log("msg", "reconciling integrations DaemonSet", "ds", key) - err = clientutil.CreateOrUpdateDaemonSet(ctx, r.Client, ds, l) - if err != nil { - return fmt.Errorf("failed to reconcile integrations DaemonSet: %w", err) - } - return nil -} diff --git a/internal/static/operator/reconciler_integrations_test.go b/internal/static/operator/reconciler_integrations_test.go deleted file mode 100644 index 6f0d0c9686..0000000000 --- a/internal/static/operator/reconciler_integrations_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package operator - -import ( - "testing" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/stretchr/testify/require" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_deploymentIntegrationSubset(t *testing.T) { - var ( - nodeExporter = &gragent.Integration{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "node_exporter", - Namespace: "default", - }, - Spec: gragent.IntegrationSpec{ - Name: "node_exporter", - Type: gragent.IntegrationType{AllNodes: true}, - }, - } - process = &gragent.Integration{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "process", - Namespace: "default", - }, - Spec: gragent.IntegrationSpec{ - Name: "process", - Type: gragent.IntegrationType{AllNodes: true}, - }, - } - redis = &gragent.Integration{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "redis", - Namespace: "default", - }, - Spec: gragent.IntegrationSpec{ - Name: "redis", - Type: gragent.IntegrationType{AllNodes: false}, - }, - } - - deploy = gragent.Deployment{ - Integrations: []gragent.IntegrationsDeployment{ - {Instance: nodeExporter}, - {Instance: process}, - {Instance: redis}, - }, - } - ) - - tt := []struct { - name string - allNodes bool - expect []*gragent.Integration - }{ - { - name: "allNodes=false", - allNodes: false, - expect: []*gragent.Integration{redis}, - }, - { - name: "allNodes=true", - allNodes: true, - expect: []*gragent.Integration{nodeExporter, process}, - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - res := deploymentIntegrationSubset(deploy, tc.allNodes) - - integrations := make([]*gragent.Integration, 0, len(res.Integrations)) - for _, i := range res.Integrations { - integrations = append(integrations, i.Instance) - } - - require.Equal(t, tc.expect, integrations) - }) - } -} diff --git a/internal/static/operator/reconciler_logs.go b/internal/static/operator/reconciler_logs.go deleted file mode 100644 index cf12348851..0000000000 --- a/internal/static/operator/reconciler_logs.go +++ /dev/null @@ -1,55 +0,0 @@ -package operator - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/clientutil" - "github.com/grafana/agent/internal/static/operator/config" - apps_v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" -) - -// createLogsConfigurationSecret creates the Grafana Agent logs configuration -// and stores it into a secret. -func (r *reconciler) createLogsConfigurationSecret( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - name := fmt.Sprintf("%s-logs-config", d.Agent.Name) - return r.createTelemetryConfigurationSecret(ctx, l, name, d, config.LogsType) -} - -// createLogsDaemonSet creates a DaemonSet for logs. -func (r *reconciler) createLogsDaemonSet( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - name := fmt.Sprintf("%s-logs", d.Agent.Name) - ds, err := generateLogsDaemonSet(r.config, name, d) - if err != nil { - return fmt.Errorf("failed to generate DaemonSet: %w", err) - } - key := types.NamespacedName{Namespace: ds.Namespace, Name: ds.Name} - - if len(d.Logs) == 0 { - // There's nothing to deploy; delete anything that might've been deployed - // from a previous reconcile. - var ds apps_v1.DaemonSet - return deleteManagedResource(ctx, r.Client, key, &ds) - } - - level.Info(l).Log("msg", "reconciling logs daemonset", "ds", key) - err = clientutil.CreateOrUpdateDaemonSet(ctx, r.Client, ds, l) - if err != nil { - return fmt.Errorf("failed to reconcile statefulset governing service: %w", err) - } - return nil -} diff --git a/internal/static/operator/reconciler_metrics.go b/internal/static/operator/reconciler_metrics.go deleted file mode 100644 index 86b43c11da..0000000000 --- a/internal/static/operator/reconciler_metrics.go +++ /dev/null @@ -1,207 +0,0 @@ -package operator - -import ( - "bytes" - "compress/gzip" - "context" - "errors" - "fmt" - "os" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/google/go-jsonnet" - apps_v1 "k8s.io/api/apps/v1" - core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/clientutil" - "github.com/grafana/agent/internal/static/operator/config" -) - -// createMetricsConfigurationSecret creates the Grafana Agent metrics configuration and stores -// it into a secret. -func (r *reconciler) createMetricsConfigurationSecret( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - name := fmt.Sprintf("%s-config", d.Agent.Name) - return r.createTelemetryConfigurationSecret(ctx, l, name, d, config.MetricsType) -} - -func (r *reconciler) createTelemetryConfigurationSecret( - ctx context.Context, - l log.Logger, - name string, - d gragent.Deployment, - ty config.Type, -) error { - - key := types.NamespacedName{ - Namespace: d.Agent.Namespace, - Name: name, - } - - var shouldCreate bool - switch ty { - case config.MetricsType: - shouldCreate = len(d.Metrics) > 0 - case config.LogsType: - shouldCreate = len(d.Logs) > 0 - case config.IntegrationsType: - shouldCreate = len(d.Integrations) > 0 - default: - return fmt.Errorf("unknown telemetry type %s", ty) - } - - // Delete the old Secret if one exists and we have nothing to create. - if !shouldCreate { - var secret core_v1.Secret - return deleteManagedResource(ctx, r.Client, key, &secret) - } - - rawConfig, err := config.BuildConfig(&d, ty) - - var jsonnetError jsonnet.RuntimeError - if errors.As(err, &jsonnetError) { - // Dump Jsonnet errors to the console to retain newlines and make them - // easier to digest. - fmt.Fprintf(os.Stderr, "%s", jsonnetError.Error()) - } - if err != nil { - return fmt.Errorf("unable to build config: %w", err) - } - - const maxUncompressed = 100 * 1024 // only compress secrets over 100kB - rawBytes := []byte(rawConfig) - if len(rawBytes) > maxUncompressed { - buf := &bytes.Buffer{} - w := gzip.NewWriter(buf) - if _, err = w.Write(rawBytes); err != nil { - return fmt.Errorf("unable to compress config: %w", err) - } - if err = w.Close(); err != nil { - return fmt.Errorf("closing gzip writer: %w", err) - } - rawBytes = buf.Bytes() - } - - secret := core_v1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Namespace: key.Namespace, - Name: key.Name, - Labels: r.config.Labels.Merge(managedByOperatorLabels), - OwnerReferences: []v1.OwnerReference{{ - APIVersion: d.Agent.APIVersion, - BlockOwnerDeletion: ptr.To(true), - Kind: d.Agent.Kind, - Name: d.Agent.Name, - UID: d.Agent.UID, - }}, - }, - Data: map[string][]byte{"agent.yml": rawBytes}, - } - - level.Info(l).Log("msg", "reconciling secret", "secret", secret.Name) - err = clientutil.CreateOrUpdateSecret(ctx, r.Client, &secret) - if err != nil { - return fmt.Errorf("failed to reconcile secret: %w", err) - } - return nil -} - -// createMetricsGoverningService creates the service that governs the (eventual) -// StatefulSet. It must be created before the StatefulSet. -func (r *reconciler) createMetricsGoverningService( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - svc := generateMetricsStatefulSetService(r.config, d) - - // Delete the old Service if one exists and we have no prometheus instances. - if len(d.Metrics) == 0 { - var service core_v1.Service - key := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} - return deleteManagedResource(ctx, r.Client, key, &service) - } - - level.Info(l).Log("msg", "reconciling statefulset service", "service", svc.Name) - err := clientutil.CreateOrUpdateService(ctx, r.Client, svc) - if err != nil { - return fmt.Errorf("failed to reconcile statefulset governing service: %w", err) - } - return nil -} - -// createMetricsStatefulSets creates a set of Grafana Agent StatefulSets, one per shard. -func (r *reconciler) createMetricsStatefulSets( - ctx context.Context, - l log.Logger, - d gragent.Deployment, -) error { - - shards := minShards - if reqShards := d.Agent.Spec.Metrics.Shards; reqShards != nil && *reqShards > 1 { - shards = *reqShards - } - - // Keep track of generated stateful sets so we can delete ones that should - // no longer exist. - generated := make(map[string]struct{}) - - for shard := int32(0); shard < shards; shard++ { - // Don't generate anything if there weren't any instances. - if len(d.Metrics) == 0 { - continue - } - - name := d.Agent.Name - if shard > 0 { - name = fmt.Sprintf("%s-shard-%d", name, shard) - } - - ss, err := generateMetricsStatefulSet(r.config, name, d, shard) - if err != nil { - return fmt.Errorf("failed to generate statefulset for shard: %w", err) - } - - level.Info(l).Log("msg", "reconciling statefulset", "statefulset", ss.Name) - err = clientutil.CreateOrUpdateStatefulSet(ctx, r.Client, ss, l) - if err != nil { - return fmt.Errorf("failed to reconcile statefulset for shard: %w", err) - } - generated[ss.Name] = struct{}{} - } - - // Clean up statefulsets that should no longer exist. - var statefulSets apps_v1.StatefulSetList - err := r.List(ctx, &statefulSets, &client.ListOptions{ - LabelSelector: labels.SelectorFromSet(labels.Set{ - managedByOperatorLabel: managedByOperatorLabelValue, - agentNameLabelName: d.Agent.Name, - }), - }) - if err != nil { - return fmt.Errorf("failed to list statefulsets: %w", err) - } - for _, ss := range statefulSets.Items { - if _, keep := generated[ss.Name]; keep || !isManagedResource(&ss) { - continue - } - level.Info(l).Log("msg", "deleting stale statefulset", "name", ss.Name) - if err := r.Delete(ctx, &ss); err != nil { - return fmt.Errorf("failed to delete stale statefulset %s: %w", ss.Name, err) - } - } - - return nil -} diff --git a/internal/static/operator/resources_integrations.go b/internal/static/operator/resources_integrations.go deleted file mode 100644 index f1520ff3b1..0000000000 --- a/internal/static/operator/resources_integrations.go +++ /dev/null @@ -1,237 +0,0 @@ -package operator - -import ( - "fmt" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/assets" - "github.com/grafana/agent/internal/static/operator/config" - apps_v1 "k8s.io/api/apps/v1" - core_v1 "k8s.io/api/core/v1" -) - -func newIntegrationsDaemonSet(cfg *Config, name string, d gragent.Deployment) (*apps_v1.DaemonSet, error) { - opts := integrationsPodTemplateOptions(name, d, true) - tmpl, selector, err := generatePodTemplate(cfg, name, d, opts) - if err != nil { - return nil, err - } - - spec := apps_v1.DaemonSetSpec{ - UpdateStrategy: apps_v1.DaemonSetUpdateStrategy{ - Type: apps_v1.RollingUpdateDaemonSetStrategyType, - }, - Selector: selector, - Template: tmpl, - } - - return &apps_v1.DaemonSet{ - ObjectMeta: metadataFromPodTemplate(name, d, tmpl), - Spec: spec, - }, nil -} - -func newIntegrationsDeployment(cfg *Config, name string, d gragent.Deployment) (*apps_v1.Deployment, error) { - opts := integrationsPodTemplateOptions(name, d, false) - tmpl, selector, err := generatePodTemplate(cfg, name, d, opts) - if err != nil { - return nil, err - } - - spec := apps_v1.DeploymentSpec{ - Strategy: apps_v1.DeploymentStrategy{ - Type: apps_v1.RollingUpdateDeploymentStrategyType, - }, - Selector: selector, - Template: tmpl, - } - - return &apps_v1.Deployment{ - ObjectMeta: metadataFromPodTemplate(name, d, tmpl), - Spec: spec, - }, nil -} - -func integrationsPodTemplateOptions(name string, d gragent.Deployment, daemonset bool) podTemplateOptions { - // Integrations expect that the metrics and logs instances exist. This means - // that we have to merge the podTemplateOptions used for metrics and logs - // with the options used for integrations. - - // Since integrations may be running as a DaemonSet, it's not possible for us - // to rely on a PVC template that metrics might be using. We'll force the WAL - // to use an empty volume. - d.Agent.Spec.Storage = nil - - integrationOpts := podTemplateOptions{ - ExtraSelectorLabels: map[string]string{ - agentTypeLabel: "integrations", - }, - Privileged: daemonset, - } - - // We need to iterate over all of our integrations to append extra Volumes, - // VolumesMounts, and references to Secrets or ConfigMaps from the resource - // hierarchy. - var ( - secretsPaths []core_v1.KeyToPath - mountedKeys = map[assets.Key]struct{}{} - ) - - for _, i := range d.Integrations { - inst := i.Instance - volumePrefix := fmt.Sprintf("%s-%s-", inst.Namespace, inst.Name) - - for _, v := range inst.Spec.Volumes { - // Prefix the key of the Integration CR so it doesn't potentially collide - // with other loaded Integration CRs. - v = *v.DeepCopy() - v.Name = volumePrefix + v.Name - - integrationOpts.ExtraVolumes = append(integrationOpts.ExtraVolumes, v) - } - for _, vm := range inst.Spec.VolumeMounts { - // Prefix the key of the Integration CR so it doesn't potentially collide - // with other loaded Integration CRs. - vm = *vm.DeepCopy() - vm.Name = volumePrefix + vm.Name - - integrationOpts.ExtraVolumeMounts = append(integrationOpts.ExtraVolumeMounts, vm) - } - - for _, s := range inst.Spec.Secrets { - // We need to determine what the value for this Secret was in the shared - // Secret resource. - key := assets.KeyForSecret(inst.Namespace, &s) - if _, mounted := mountedKeys[key]; mounted { - continue - } - mountedKeys[key] = struct{}{} - - secretsPaths = append(secretsPaths, core_v1.KeyToPath{ - Key: config.SanitizeLabelName(string(key)), - Path: fmt.Sprintf("secrets/%s/%s/%s", inst.Namespace, s.Name, s.Key), - }) - } - - for _, cm := range inst.Spec.ConfigMaps { - // We need to determine what the value for this ConfigMap was in the shared - // Secret resource. - key := assets.KeyForConfigMap(inst.Namespace, &cm) - if _, mounted := mountedKeys[key]; mounted { - continue - } - mountedKeys[key] = struct{}{} - - secretsPaths = append(secretsPaths, core_v1.KeyToPath{ - Key: config.SanitizeLabelName(string(key)), - Path: fmt.Sprintf("configMaps/%s/%s/%s", inst.Namespace, cm.Name, cm.Key), - }) - } - } - - if len(secretsPaths) > 0 { - // Load in references to Secrets and ConfigMaps. - integrationSecretsName := fmt.Sprintf("%s-integrations-secrets", d.Agent.Name) - - integrationOpts.ExtraVolumes = append(integrationOpts.ExtraVolumes, core_v1.Volume{ - Name: integrationSecretsName, - VolumeSource: core_v1.VolumeSource{ - Secret: &core_v1.SecretVolumeSource{ - // The reconcile-wide Secret holds all secrets and config maps - // integrations may have used. - SecretName: fmt.Sprintf("%s-secrets", d.Agent.Name), - Items: secretsPaths, - }, - }, - }) - - integrationOpts.ExtraVolumeMounts = append(integrationOpts.ExtraVolumeMounts, core_v1.VolumeMount{ - Name: integrationSecretsName, - MountPath: "/etc/grafana-agent/integrations", - ReadOnly: true, - }) - } - - // Extra options to merge in. - // - // NOTE(rfratto): Merge order is important, as subsequent podTemplateOptions - // have placeholders necessary to generate configs. - var ( - metricsOpts = metricsPodTemplateOptions(name, d, 0) - logsOpts = logsPodTemplateOptions() - ) - return mergePodTemplateOptions(&integrationOpts, &metricsOpts, &logsOpts) -} - -// mergePodTemplateOptions merges the provided inputs into a single -// podTemplateOptions. Precedence for existing values is taken in input order; -// if an environment variable is defined in both inputs[0] and inputs[1], the -// value from inputs[0] is used. -func mergePodTemplateOptions(inputs ...*podTemplateOptions) podTemplateOptions { - res := podTemplateOptions{ - ExtraSelectorLabels: make(map[string]string), - } - - // Volumes are unique by both mount path or name. If a mount path already - // exists, we want to ignore that volume and the respective volume mount - // that uses it. - - var ( - mountNames = map[string]struct{}{} // Consumed mount names - mountPaths = map[string]struct{}{} // Consumed mount paths - volumeNames = map[string]struct{}{} // Consumed volume names - varNames = map[string]struct{}{} // Consumed variable names - ) - - for _, input := range inputs { - for k, v := range input.ExtraSelectorLabels { - if _, exist := res.ExtraSelectorLabels[k]; exist { - continue - } - res.ExtraSelectorLabels[k] = v - } - - // Merge in VolumeMounts before Volumes, allowing us to detect what volume - // names specific to this input should be ignored. - ignoreVolumes := map[string]struct{}{} - - for _, vm := range input.ExtraVolumeMounts { - // Ignore a volume if the mount path or volume name already exists. - var ( - _, exists = mountNames[vm.Name] - _, mounted = mountPaths[vm.MountPath] - ) - if exists || mounted { - ignoreVolumes[vm.Name] = struct{}{} - continue - } - - res.ExtraVolumeMounts = append(res.ExtraVolumeMounts, vm) - mountNames[vm.Name] = struct{}{} - mountPaths[vm.MountPath] = struct{}{} - } - - // Merge in volumes that haven't been ignored or have a unique name. - for _, v := range input.ExtraVolumes { - if _, ignored := ignoreVolumes[v.Name]; ignored { - continue - } else if _, exists := volumeNames[v.Name]; exists { - continue - } - - res.ExtraVolumes = append(res.ExtraVolumes, v) - volumeNames[v.Name] = struct{}{} - } - - for _, ev := range input.ExtraEnvVars { - if _, exists := varNames[ev.Name]; exists { - continue - } - - res.ExtraEnvVars = append(res.ExtraEnvVars, ev) - varNames[ev.Name] = struct{}{} - } - } - - return res -} diff --git a/internal/static/operator/resources_logs.go b/internal/static/operator/resources_logs.go deleted file mode 100644 index 5edf4c7860..0000000000 --- a/internal/static/operator/resources_logs.go +++ /dev/null @@ -1,95 +0,0 @@ -package operator - -import ( - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - apps_v1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" -) - -func generateLogsDaemonSet( - cfg *Config, - name string, - d gragent.Deployment, -) (*apps_v1.DaemonSet, error) { - - d = *(&d).DeepCopy() - - opts := logsPodTemplateOptions() - tmpl, selector, err := generatePodTemplate(cfg, name, d, opts) - if err != nil { - return nil, err - } - - spec := apps_v1.DaemonSetSpec{ - UpdateStrategy: apps_v1.DaemonSetUpdateStrategy{ - Type: apps_v1.RollingUpdateDaemonSetStrategyType, - }, - Selector: selector, - Template: tmpl, - } - - return &apps_v1.DaemonSet{ - ObjectMeta: metadataFromPodTemplate(name, d, tmpl), - Spec: spec, - }, nil -} - -func logsPodTemplateOptions() podTemplateOptions { - return podTemplateOptions{ - Privileged: true, - ExtraSelectorLabels: map[string]string{ - agentTypeLabel: "logs", - }, - ExtraVolumes: []v1.Volume{ - { - Name: "varlog", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: "/var/log"}, - }, - }, - { - // Needed for docker. Kubernetes will symlink to this directory. For CRI - // platforms, this doesn't change anything. - Name: "dockerlogs", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/docker/containers"}, - }, - }, - { - // Needed for storing positions for recovery. - Name: "data", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{Path: "/var/lib/grafana-agent/data"}, - }, - }, - }, - ExtraVolumeMounts: []v1.VolumeMount{ - { - Name: "varlog", - ReadOnly: true, - MountPath: "/var/log", - }, { - Name: "dockerlogs", - ReadOnly: true, - MountPath: "/var/lib/docker/containers", - }, { - Name: "data", - MountPath: "/var/lib/grafana-agent/data", - }, - }, - ExtraEnvVars: []v1.EnvVar{ - { - Name: "HOSTNAME", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, - }, - }, - { - // Not used anywhere for logs but passed to the config-reloader since it - // expects everything is coming from a StatefulSet. - Name: "SHARD", - Value: "0", - }, - }, - } -} diff --git a/internal/static/operator/resources_metrics.go b/internal/static/operator/resources_metrics.go deleted file mode 100644 index f3facae949..0000000000 --- a/internal/static/operator/resources_metrics.go +++ /dev/null @@ -1,254 +0,0 @@ -package operator - -import ( - "context" - "fmt" - "strings" - - prom_operator "github.com/prometheus-operator/prometheus-operator/pkg/operator" - apps_v1 "k8s.io/api/apps/v1" - core_v1 "k8s.io/api/core/v1" - k8s_errors "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" -) - -const ( - defaultPortName = "http-metrics" -) - -var ( - minShards int32 = 1 - minReplicas int32 = 1 - managedByOperatorLabel = "app.kubernetes.io/managed-by" - managedByOperatorLabelValue = "grafana-agent-operator" - versionLabelName = "app.kubernetes.io/version" - managedByOperatorLabels = map[string]string{ - managedByOperatorLabel: managedByOperatorLabelValue, - } - shardLabelName = "operator.agent.grafana.com/shard" - agentNameLabelName = "operator.agent.grafana.com/name" - agentTypeLabel = "operator.agent.grafana.com/type" - probeTimeoutSeconds int32 = 3 -) - -// deleteManagedResource deletes a managed resource. Ignores resources that are -// not managed. -func deleteManagedResource(ctx context.Context, cli client.Client, key client.ObjectKey, o client.Object) error { - err := cli.Get(ctx, key, o) - if k8s_errors.IsNotFound(err) || !isManagedResource(o) { - return nil - } else if err != nil { - return fmt.Errorf("failed to find stale resource %s: %w", key, err) - } - err = cli.Delete(ctx, o) - if err != nil { - return fmt.Errorf("failed to delete stale resource %s: %w", key, err) - } - return nil -} - -// isManagedResource returns true if the given object has a managed-by -// grafana-agent-operator label. -func isManagedResource(obj client.Object) bool { - labelValue := obj.GetLabels()[managedByOperatorLabel] - return labelValue == managedByOperatorLabelValue -} - -func governingServiceName(agentName string) string { - return fmt.Sprintf("%s-operated", agentName) -} - -func generateMetricsStatefulSetService(cfg *Config, d gragent.Deployment) *core_v1.Service { - d = *d.DeepCopy() - - if d.Agent.Spec.PortName == "" { - d.Agent.Spec.PortName = defaultPortName - } - - return &core_v1.Service{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: governingServiceName(d.Agent.Name), - Namespace: d.Agent.Namespace, - OwnerReferences: []meta_v1.OwnerReference{{ - APIVersion: d.Agent.APIVersion, - Kind: d.Agent.Kind, - Name: d.Agent.Name, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - UID: d.Agent.UID, - }}, - Labels: cfg.Labels.Merge(map[string]string{ - managedByOperatorLabel: managedByOperatorLabelValue, - agentNameLabelName: d.Agent.Name, - "operated-agent": "true", - }), - }, - Spec: core_v1.ServiceSpec{ - ClusterIP: "None", - Ports: []core_v1.ServicePort{{ - Name: d.Agent.Spec.PortName, - Port: 8080, - TargetPort: intstr.FromString(d.Agent.Spec.PortName), - }}, - Selector: map[string]string{ - "app.kubernetes.io/name": "grafana-agent", - agentNameLabelName: d.Agent.Name, - }, - }, - } -} - -func generateMetricsStatefulSet( - cfg *Config, - name string, - d gragent.Deployment, - shard int32, -) (*apps_v1.StatefulSet, error) { - - d = *d.DeepCopy() - - opts := metricsPodTemplateOptions(name, d, shard) - templateSpec, selector, err := generatePodTemplate(cfg, d.Agent.Name, d, opts) - if err != nil { - return nil, err - } - - spec := &apps_v1.StatefulSetSpec{ - ServiceName: governingServiceName(d.Agent.Name), - Replicas: d.Agent.Spec.Metrics.Replicas, - PodManagementPolicy: apps_v1.ParallelPodManagement, - UpdateStrategy: apps_v1.StatefulSetUpdateStrategy{ - Type: apps_v1.RollingUpdateStatefulSetStrategyType, - }, - Selector: selector, - Template: templateSpec, - } - - ss := &apps_v1.StatefulSet{ - ObjectMeta: metadataFromPodTemplate(name, d, templateSpec), - Spec: *spec, - } - - if deploymentUseVolumeClaimTemplate(&d) { - storageSpec := d.Agent.Spec.Storage - pvcTemplate := prom_operator.MakeVolumeClaimTemplate(storageSpec.VolumeClaimTemplate) - if pvcTemplate.Name == "" { - pvcTemplate.Name = fmt.Sprintf("%s-wal", name) - } - if storageSpec.VolumeClaimTemplate.Spec.AccessModes == nil { - pvcTemplate.Spec.AccessModes = []core_v1.PersistentVolumeAccessMode{core_v1.ReadWriteOnce} - } else { - pvcTemplate.Spec.AccessModes = storageSpec.VolumeClaimTemplate.Spec.AccessModes - } - pvcTemplate.Spec.Resources = storageSpec.VolumeClaimTemplate.Spec.Resources - pvcTemplate.Spec.Selector = storageSpec.VolumeClaimTemplate.Spec.Selector - ss.Spec.VolumeClaimTemplates = append(ss.Spec.VolumeClaimTemplates, *pvcTemplate) - } - - return ss, nil -} - -func deploymentUseVolumeClaimTemplate(d *gragent.Deployment) bool { - return d.Agent.Spec.Storage != nil && d.Agent.Spec.Storage.EmptyDir == nil -} - -func metricsPodTemplateOptions(name string, d gragent.Deployment, shard int32) podTemplateOptions { - shards := minShards - if reqShards := d.Agent.Spec.Metrics.Shards; reqShards != nil && *reqShards > 1 { - shards = *reqShards - } - - walVolumeName := fmt.Sprintf("%s-wal", name) - if d.Agent.Spec.Storage != nil { - if d.Agent.Spec.Storage.VolumeClaimTemplate.Name != "" { - walVolumeName = d.Agent.Spec.Storage.VolumeClaimTemplate.Name - } - } - - opts := podTemplateOptions{ - ExtraSelectorLabels: map[string]string{ - shardLabelName: fmt.Sprintf("%d", shard), - agentTypeLabel: "metrics", - }, - ExtraVolumeMounts: []core_v1.VolumeMount{{ - Name: walVolumeName, - ReadOnly: false, - MountPath: "/var/lib/grafana-agent/data", - }}, - ExtraEnvVars: []core_v1.EnvVar{ - { - Name: "SHARD", - Value: fmt.Sprintf("%d", shard), - }, - { - Name: "SHARDS", - Value: fmt.Sprintf("%d", shards), - }, - }, - } - - // Add volumes if there's no PVC template - storageSpec := d.Agent.Spec.Storage - if storageSpec == nil { - opts.ExtraVolumes = append(opts.ExtraVolumes, core_v1.Volume{ - Name: walVolumeName, - VolumeSource: core_v1.VolumeSource{ - EmptyDir: &core_v1.EmptyDirVolumeSource{}, - }, - }) - } else if storageSpec.EmptyDir != nil { - emptyDir := storageSpec.EmptyDir - opts.ExtraVolumes = append(opts.ExtraVolumes, core_v1.Volume{ - Name: walVolumeName, - VolumeSource: core_v1.VolumeSource{ - EmptyDir: emptyDir, - }, - }) - } - - return opts -} - -func metadataFromPodTemplate(name string, d gragent.Deployment, tmpl core_v1.PodTemplateSpec) meta_v1.ObjectMeta { - labels := make(map[string]string, len(tmpl.Labels)) - for k, v := range tmpl.Labels { - // do not put version label on the statefulset, as that will prevent us from updating it - // in the future. Statefulset labels are immutable. - if k != versionLabelName { - labels[k] = v - } - } - return meta_v1.ObjectMeta{ - Name: name, - Namespace: d.Agent.Namespace, - Labels: labels, - Annotations: prepareAnnotations(d.Agent.Annotations), - OwnerReferences: []meta_v1.OwnerReference{{ - APIVersion: d.Agent.APIVersion, - Kind: d.Agent.Kind, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - Name: d.Agent.Name, - UID: d.Agent.UID, - }}, - } -} - -// prepareAnnotations returns annotations that are safe to be added to a -// generated resource. -func prepareAnnotations(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - // Ignore kubectl annotations so kubectl doesn't prune the resource we - // generated. - if !strings.HasPrefix(k, "kubectl.kubernetes.io/") { - res[k] = v - } - } - return res -} diff --git a/internal/static/operator/resources_metrics_test.go b/internal/static/operator/resources_metrics_test.go deleted file mode 100644 index 5041d70463..0000000000 --- a/internal/static/operator/resources_metrics_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package operator - -import ( - "testing" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/stretchr/testify/require" - core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestMetadataFromPodTemplate(t *testing.T) { - t.Run("Should not include version label in statefulset metadata", func(t *testing.T) { - meta := metadataFromPodTemplate("foo", - gragent.Deployment{ - Agent: &gragent.GrafanaAgent{}, - }, - core_v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: map[string]string{versionLabelName: "v1.2.3"}, - }, - }) - require.NotContains(t, meta.Labels, versionLabelName) - }) -} diff --git a/internal/static/operator/resources_pod_template.go b/internal/static/operator/resources_pod_template.go deleted file mode 100644 index 322d6c6c42..0000000000 --- a/internal/static/operator/resources_pod_template.go +++ /dev/null @@ -1,319 +0,0 @@ -package operator - -import ( - "fmt" - "path" - - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - - "github.com/grafana/agent/internal/build" - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/grafana/agent/internal/static/operator/clientutil" -) - -type podTemplateOptions struct { - ExtraSelectorLabels map[string]string - ExtraVolumes []core_v1.Volume - ExtraVolumeMounts []core_v1.VolumeMount - ExtraEnvVars []core_v1.EnvVar - Privileged bool -} - -func generatePodTemplate( - cfg *Config, - name string, - d gragent.Deployment, - opts podTemplateOptions, -) (core_v1.PodTemplateSpec, *meta_v1.LabelSelector, error) { - - // generatePodTemplate assumes that the deployment has default values applied - // to it. - applyDeploymentDefaults(&d) - - useVersion := d.Agent.Spec.Version - if useVersion == "" { - useVersion = DefaultAgentVersion - } - imagePath := fmt.Sprintf("%s:%s", DefaultAgentBaseImage, useVersion) - if d.Agent.Spec.Image != nil && *d.Agent.Spec.Image != "" { - imagePath = *d.Agent.Spec.Image - } - - agentArgs := []string{ - "-config.file=/var/lib/grafana-agent/config/agent.yml", - "-config.expand-env=true", - "-server.http.address=0.0.0.0:8080", - "-enable-features=integrations-next", - } - - enableConfigReadAPI := d.Agent.Spec.EnableConfigReadAPI - if enableConfigReadAPI { - agentArgs = append(agentArgs, "-config.enable-read-api") - } - - disableReporting := d.Agent.Spec.DisableReporting - if disableReporting { - agentArgs = append(agentArgs, "-disable-reporting") - } - - if d.Agent.Spec.DisableSupportBundle { - agentArgs = append(agentArgs, "-disable-support-bundle") - } - - // NOTE(rfratto): the Prometheus Operator supports a ListenLocal to prevent a - // service from being created. Given the intent is that Agents can connect to - // each other, ListenLocal isn't currently supported and we always create a - // port. - ports := []core_v1.ContainerPort{{ - Name: d.Agent.Spec.PortName, - ContainerPort: 8080, - Protocol: core_v1.ProtocolTCP, - }} - - volumes := []core_v1.Volume{ - { - Name: "config", - VolumeSource: core_v1.VolumeSource{ - Secret: &core_v1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-config", name), - }, - }, - }, - { - // We need a separate volume for storing the rendered config with - // environment variables replaced. While the Agent supports environment - // variable substitution, the value for __replica__ can only be - // determined at runtime. We use a dedicated container for both config - // reloading and rendering. - Name: "config-out", - VolumeSource: core_v1.VolumeSource{ - EmptyDir: &core_v1.EmptyDirVolumeSource{}, - }, - }, - { - Name: "secrets", - VolumeSource: core_v1.VolumeSource{ - Secret: &core_v1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-secrets", d.Agent.Name), - }, - }, - }, - } - volumes = append(volumes, opts.ExtraVolumes...) - volumes = append(volumes, d.Agent.Spec.Volumes...) - - volumeMounts := []core_v1.VolumeMount{ - { - Name: "config", - ReadOnly: true, - MountPath: "/var/lib/grafana-agent/config-in", - }, - { - Name: "config-out", - MountPath: "/var/lib/grafana-agent/config", - }, - { - Name: "secrets", - ReadOnly: true, - MountPath: "/var/lib/grafana-agent/secrets", - }, - } - volumeMounts = append(volumeMounts, opts.ExtraVolumeMounts...) - volumeMounts = append(volumeMounts, d.Agent.Spec.VolumeMounts...) - - for _, s := range d.Agent.Spec.Secrets { - volumes = append(volumes, core_v1.Volume{ - Name: clientutil.SanitizeVolumeName("secret-" + s), - VolumeSource: core_v1.VolumeSource{ - Secret: &core_v1.SecretVolumeSource{SecretName: s}, - }, - }) - volumeMounts = append(volumeMounts, core_v1.VolumeMount{ - Name: clientutil.SanitizeVolumeName("secret-" + s), - ReadOnly: true, - MountPath: path.Join("/var/lib/grafana-agent/extra-secrets", s), - }) - } - - for _, c := range d.Agent.Spec.ConfigMaps { - volumes = append(volumes, core_v1.Volume{ - Name: clientutil.SanitizeVolumeName("configmap-" + c), - VolumeSource: core_v1.VolumeSource{ - ConfigMap: &core_v1.ConfigMapVolumeSource{ - LocalObjectReference: core_v1.LocalObjectReference{Name: c}, - }, - }, - }) - volumeMounts = append(volumeMounts, core_v1.VolumeMount{ - Name: clientutil.SanitizeVolumeName("configmap-" + c), - ReadOnly: true, - MountPath: path.Join("/var/lib/grafana-agent/extra-configmaps", c), - }) - } - - var ( - podAnnotations = map[string]string{} - podLabels = map[string]string{ - // version can be a pod label, but should not go in selectors - versionLabelName: clientutil.SanitizeVolumeName(build.Version), - } - podSelectorLabels = map[string]string{ - "app.kubernetes.io/name": "grafana-agent", - "app.kubernetes.io/instance": d.Agent.Name, - "grafana-agent": d.Agent.Name, - managedByOperatorLabel: managedByOperatorLabelValue, - agentNameLabelName: d.Agent.Name, - } - ) - for k, v := range opts.ExtraSelectorLabels { - podSelectorLabels[k] = v - } - - if d.Agent.Spec.PodMetadata != nil { - for k, v := range d.Agent.Spec.PodMetadata.Labels { - podLabels[k] = v - } - for k, v := range d.Agent.Spec.PodMetadata.Annotations { - podAnnotations[k] = v - } - } - for k, v := range podSelectorLabels { - podLabels[k] = v - } - - podAnnotations["kubectl.kubernetes.io/default-container"] = "grafana-agent" - - var ( - finalSelectorLabels = cfg.Labels.Merge(podSelectorLabels) - finalLabels = cfg.Labels.Merge(podLabels) - ) - - envVars := []core_v1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &core_v1.EnvVarSource{ - FieldRef: &core_v1.ObjectFieldSelector{FieldPath: "metadata.name"}, - }, - }, - // Allows the agent to identify this is an operator-created pod. - { - Name: "AGENT_DEPLOY_MODE", - Value: "operator", - }, - } - envVars = append(envVars, opts.ExtraEnvVars...) - - useConfigReloaderVersion := d.Agent.Spec.ConfigReloaderVersion - if useConfigReloaderVersion == "" { - useConfigReloaderVersion = DefaultConfigReloaderVersion - } - imagePathConfigReloader := fmt.Sprintf("%s:%s", DefaultConfigReloaderBaseImage, useConfigReloaderVersion) - if d.Agent.Spec.ConfigReloaderImage != nil && *d.Agent.Spec.ConfigReloaderImage != "" { - imagePathConfigReloader = *d.Agent.Spec.ConfigReloaderImage - } - - boolFalse := false - boolTrue := true - operatorContainers := []core_v1.Container{ - { - Name: "config-reloader", - Image: imagePathConfigReloader, - VolumeMounts: volumeMounts, - Env: envVars, - SecurityContext: &core_v1.SecurityContext{ - AllowPrivilegeEscalation: &boolFalse, - ReadOnlyRootFilesystem: &boolTrue, - Capabilities: &core_v1.Capabilities{ - Drop: []core_v1.Capability{"ALL"}, - }, - }, - Args: []string{ - "--config-file=/var/lib/grafana-agent/config-in/agent.yml", - "--config-envsubst-file=/var/lib/grafana-agent/config/agent.yml", - - "--watch-interval=1m", - "--statefulset-ordinal-from-envvar=POD_NAME", - "--reload-url=http://127.0.0.1:8080/-/reload", - }, - }, - { - Name: "grafana-agent", - Image: imagePath, - Ports: ports, - Args: agentArgs, - VolumeMounts: volumeMounts, - Env: envVars, - ReadinessProbe: &core_v1.Probe{ - ProbeHandler: core_v1.ProbeHandler{ - HTTPGet: &core_v1.HTTPGetAction{ - Path: "/-/ready", - Port: intstr.FromString(d.Agent.Spec.PortName), - }, - }, - TimeoutSeconds: probeTimeoutSeconds, - PeriodSeconds: 5, - FailureThreshold: 120, // Allow up to 10m on startup for data recovery - }, - Resources: d.Agent.Spec.Resources, - SecurityContext: &core_v1.SecurityContext{ - Privileged: ptr.To(opts.Privileged), - }, - TerminationMessagePolicy: core_v1.TerminationMessageFallbackToLogsOnError, - }, - } - - containers, err := clientutil.MergePatchContainers(operatorContainers, d.Agent.Spec.Containers) - if err != nil { - return core_v1.PodTemplateSpec{}, nil, fmt.Errorf("failed to merge containers spec: %w", err) - } - - var pullSecrets []core_v1.LocalObjectReference - if len(d.Agent.Spec.ImagePullSecrets) > 0 { - pullSecrets = d.Agent.Spec.ImagePullSecrets - } - - template := core_v1.PodTemplateSpec{ - ObjectMeta: meta_v1.ObjectMeta{ - Labels: finalLabels, - Annotations: podAnnotations, - }, - Spec: core_v1.PodSpec{ - Containers: containers, - ImagePullSecrets: pullSecrets, - InitContainers: d.Agent.Spec.InitContainers, - SecurityContext: d.Agent.Spec.SecurityContext, - ServiceAccountName: d.Agent.Spec.ServiceAccountName, - NodeSelector: d.Agent.Spec.NodeSelector, - PriorityClassName: d.Agent.Spec.PriorityClassName, - RuntimeClassName: d.Agent.Spec.RuntimeClassName, - TerminationGracePeriodSeconds: ptr.To(int64(4800)), - Volumes: volumes, - Tolerations: d.Agent.Spec.Tolerations, - Affinity: d.Agent.Spec.Affinity, - TopologySpreadConstraints: d.Agent.Spec.TopologySpreadConstraints, - }, - } - return template, &meta_v1.LabelSelector{MatchLabels: finalSelectorLabels}, nil -} - -func applyDeploymentDefaults(d *gragent.Deployment) { - if d.Agent.Spec.Metrics.Replicas != nil && *d.Agent.Spec.Metrics.Replicas < 0 { - intZero := int32(0) - d.Agent.Spec.Metrics.Replicas = &intZero - } - - if d.Agent.Spec.Resources.Requests == nil { - d.Agent.Spec.Resources.Requests = core_v1.ResourceList{} - } - - if d.Agent.Spec.Metrics.Replicas == nil { - d.Agent.Spec.Metrics.Replicas = &minReplicas - } - - if d.Agent.Spec.PortName == "" { - d.Agent.Spec.PortName = defaultPortName - } -} diff --git a/internal/static/operator/resources_pod_template_test.go b/internal/static/operator/resources_pod_template_test.go deleted file mode 100644 index 89b84a36b2..0000000000 --- a/internal/static/operator/resources_pod_template_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package operator - -import ( - "testing" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_generatePodTemplate(t *testing.T) { - var ( - cfg = &Config{} - name = "example" - ) - - t.Run("image should have version", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, DefaultAgentImage, tmpl.Spec.Containers[1].Image) - }) - - t.Run("reloader image should have version", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, DefaultConfigReloaderImage, tmpl.Spec.Containers[0].Image) - }) - - t.Run("allow custom version", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - Spec: gragent.GrafanaAgentSpec{ - Version: "vX.Y.Z", - }, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, DefaultAgentBaseImage+":vX.Y.Z", tmpl.Spec.Containers[1].Image) - }) - - t.Run("allow custom version for reloader", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - Spec: gragent.GrafanaAgentSpec{ - ConfigReloaderVersion: "vX.Y.Z", - }, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, DefaultConfigReloaderBaseImage+":vX.Y.Z", tmpl.Spec.Containers[0].Image) - }) - - t.Run("does not set version label in spec selector", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, selectors, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - - // version label should not be set in selectors, since that is immutable - require.NotContains(t, selectors.MatchLabels, versionLabelName) - require.Contains(t, tmpl.ObjectMeta.Labels, versionLabelName) - }) - - t.Run("security ctx does not contain privileged", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, "config-reloader", tmpl.Spec.Containers[0].Name) - for i := range tmpl.Spec.Containers { - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.Privileged != nil && - *tmpl.Spec.Containers[i].SecurityContext.Privileged, - "privileged is not required. Fargate cannot schedule privileged containers.") - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.RunAsUser != nil && - *tmpl.Spec.Containers[i].SecurityContext.RunAsUser == int64(0), - "force the container to run as root is not required.") - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.AllowPrivilegeEscalation != nil && - *tmpl.Spec.Containers[i].SecurityContext.AllowPrivilegeEscalation, - "allow privilege escalation is not required.") - } - }) - - t.Run("security ctx does contain privilege for logs daemonset", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{Privileged: true}) - require.NoError(t, err) - for i := range tmpl.Spec.Containers { - // only grafana-agent container is supposed to be privileged - if tmpl.Spec.Containers[i].Name == "grafana-agent" { - assert.True(t, tmpl.Spec.Containers[i].SecurityContext.Privileged != nil && - *tmpl.Spec.Containers[i].SecurityContext.Privileged, - "privileged is needed for grafana-agent if pod options say so.") - } else { - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.Privileged != nil && - *tmpl.Spec.Containers[i].SecurityContext.Privileged, - "privileged is not required for other containers.") - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.RunAsUser != nil && - *tmpl.Spec.Containers[i].SecurityContext.RunAsUser == int64(0), - "force the container to run as root is not required for other containers.") - assert.False(t, tmpl.Spec.Containers[i].SecurityContext.AllowPrivilegeEscalation != nil && - *tmpl.Spec.Containers[i].SecurityContext.AllowPrivilegeEscalation, - "allow privilege escalation is not required for other containers.") - } - } - }) - - t.Run("runtimeclassname set if passed in", func(t *testing.T) { - name := "test123" - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - Spec: gragent.GrafanaAgentSpec{ - RuntimeClassName: &name, - }, - }, - } - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - assert.Equal(t, name, *tmpl.Spec.RuntimeClassName) - - deploy.Agent.Spec.RuntimeClassName = nil - tmpl, _, err = generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - assert.Nil(t, tmpl.Spec.RuntimeClassName) - }) - - t.Run("AGENT_DEPLOY_MODE env ser", func(t *testing.T) { - deploy := gragent.Deployment{ - Agent: &gragent.GrafanaAgent{ - ObjectMeta: v1.ObjectMeta{Name: name, Namespace: name}, - }, - } - - tmpl, _, err := generatePodTemplate(cfg, "agent", deploy, podTemplateOptions{}) - require.NoError(t, err) - require.Equal(t, "operator", tmpl.Spec.Containers[1].Env[1].Value) - require.Equal(t, "AGENT_DEPLOY_MODE", tmpl.Spec.Containers[1].Env[1].Name) - }) -} diff --git a/internal/static/operator/testdata/test-custom-mounts.in.yaml b/internal/static/operator/testdata/test-custom-mounts.in.yaml deleted file mode 100644 index 3bf0aa28d9..0000000000 --- a/internal/static/operator/testdata/test-custom-mounts.in.yaml +++ /dev/null @@ -1,70 +0,0 @@ -apiVersion: monitoring.grafana.com/v1alpha1 -kind: GrafanaAgent -metadata: - name: grafana-agent-example - namespace: default - labels: - app: grafana-agent-example -spec: - image: grafana/agent:latest - secrets: [extra-secret-a] - configMaps: [extra-configmap-a] - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-example - logs: - instanceSelector: - matchLabels: - agent: grafana-agent-example - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - remoteWrite: - - url: http://prometheus.default.svc.cluster.local:9090/prometheus/api/v1/write - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: LogsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - clients: - - url: http://loki:8080/loki/api/v1/push - -# -# Extra resources -# - ---- - -apiVersion: v1 -kind: Secret -metadata: - name: extra-secret-a - namespace: default -data: - # "user" - fakeUsername: "dXNlcg==" - ---- - -apiVersion: v1 -kind: ConfigMap -metadata: - name: extra-configmap-a - namespace: default -data: - greeting: "hello, world" diff --git a/internal/static/operator/testdata/test-custom-mounts.out.yaml b/internal/static/operator/testdata/test-custom-mounts.out.yaml deleted file mode 100644 index e0ea607594..0000000000 --- a/internal/static/operator/testdata/test-custom-mounts.out.yaml +++ /dev/null @@ -1,148 +0,0 @@ -# Resources to assert to exist when reconciling test-custom-mounts.in.yaml. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana-agent-example - namespace: default -spec: - template: - spec: - containers: - - name: config-reloader - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: grafana-agent-example-wal - mountPath: /var/lib/grafana-agent/data - - name: secret-extra-secret-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-secrets/extra-secret-a - - name: configmap-extra-configmap-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-configmaps/extra-configmap-a - - name: grafana-agent - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: grafana-agent-example-wal - mountPath: /var/lib/grafana-agent/data - - name: secret-extra-secret-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-secrets/extra-secret-a - - name: configmap-extra-configmap-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-configmaps/extra-configmap-a - volumes: - - name: config - secret: - secretName: grafana-agent-example-config - - name: config-out - emptyDir: {} - - name: secrets - secret: - secretName: grafana-agent-example-secrets - - name: grafana-agent-example-wal - emptyDir: {} - - name: secret-extra-secret-a - secret: - secretName: extra-secret-a - - name: configmap-extra-configmap-a - configMap: - name: extra-configmap-a - ---- - -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: grafana-agent-example-logs - namespace: default -spec: - template: - spec: - containers: - - name: config-reloader - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: varlog - readOnly: true - mountPath: /var/log - - name: dockerlogs - readOnly: true - mountPath: /var/lib/docker/containers - - name: data - mountPath: /var/lib/grafana-agent/data - - name: secret-extra-secret-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-secrets/extra-secret-a - - name: configmap-extra-configmap-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-configmaps/extra-configmap-a - - name: grafana-agent - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: varlog - readOnly: true - mountPath: /var/log - - name: dockerlogs - readOnly: true - mountPath: /var/lib/docker/containers - - name: data - mountPath: /var/lib/grafana-agent/data - - name: secret-extra-secret-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-secrets/extra-secret-a - - name: configmap-extra-configmap-a - readOnly: true - mountPath: /var/lib/grafana-agent/extra-configmaps/extra-configmap-a - volumes: - - name: config - secret: - secretName: grafana-agent-example-logs-config - - name: config-out - emptyDir: {} - - name: secrets - secret: - secretName: grafana-agent-example-secrets - - name: varlog - hostPath: - path: /var/log - - name: dockerlogs - hostPath: - path: /var/lib/docker/containers - - name: data - hostPath: - path: /var/lib/grafana-agent/data - - name: secret-extra-secret-a - secret: - secretName: extra-secret-a - - name: configmap-extra-configmap-a - configMap: - name: extra-configmap-a diff --git a/internal/static/operator/testdata/test-integrations.in.yaml b/internal/static/operator/testdata/test-integrations.in.yaml deleted file mode 100644 index 3211763478..0000000000 --- a/internal/static/operator/testdata/test-integrations.in.yaml +++ /dev/null @@ -1,237 +0,0 @@ -apiVersion: monitoring.grafana.com/v1alpha1 -kind: GrafanaAgent -metadata: - name: grafana-agent-example - namespace: default - labels: - app: grafana-agent-example -spec: - image: grafana/agent:main - logLevel: info - serviceAccountName: grafana-agent - storage: - volumeClaimTemplate: - spec: - resources: - requests: - storage: 1Gi - logs: - instanceSelector: - matchLabels: - agent: grafana-agent-example - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-example - integrations: - selector: - matchLabels: - agent: grafana-agent-example - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - remoteWrite: - - url: http://prometheus:9090/api/v1/write - podMonitorNamespaceSelector: {} - podMonitorSelector: - matchLabels: - instance: primary - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: LogsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - clients: - - url: http://loki:8080/loki/api/v1/push - - # Supply an empty namespace selector to look in all namespaces. - podLogsNamespaceSelector: {} - podLogsSelector: - matchLabels: - instance: primary - ---- - -# Have the Agent monitor itself. -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: grafana-agents - namespace: default - labels: - instance: primary -spec: - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - podMetricsEndpoints: - - port: http-metrics - ---- - -# Have the Agent get logs from itself. -apiVersion: monitoring.grafana.com/v1alpha1 -kind: PodLogs -metadata: - name: grafana-agents - namespace: default - labels: - instance: primary -spec: - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - pipelineStages: - - cri: {} - ---- - -# Collect node_exporter metrics. -apiVersion: monitoring.grafana.com/v1alpha1 -kind: Integration -metadata: - name: node-exporter - namespace: default - labels: - agent: grafana-agent-example -spec: - name: node_exporter - type: - allNodes: true - unique: true - config: - autoscrape: - enable: true - metrics_instance: default/primary - rootfs_path: /default/node_exporter/rootfs - sysfs_path: /default/node_exporter/sys - procfs_path: /default/node_exporter/proc - volumeMounts: - - mountPath: /default/node_exporter/proc - name: proc - - mountPath: /default/node_exporter/sys - name: sys - - mountPath: /default/node_exporter/rootfs - name: root - volumes: - - name: proc - hostPath: - path: /proc - - name: sys - hostPath: - path: /sys - - name: root - hostPath: - path: /root - # These aren't really used here, but just being mounted for local testing. - secrets: - - name: fake-secret - key: key - configMaps: - - name: fake-configmap - key: foo - ---- - -# Collect kubernetes API events. -apiVersion: monitoring.grafana.com/v1alpha1 -kind: Integration -metadata: - name: eventhandler - namespace: default - labels: - agent: grafana-agent-example -spec: - name: eventhandler - type: - unique: true - config: - logs_instance: default/primary - cache_path: "/var/lib/grafana-agent/data/eventhandler.cache" - ---- - -# -# Pretend Secrets/ConfigMaps -# - ---- -apiVersion: v1 -kind: Secret -metadata: - name: fake-secret - namespace: default -stringData: - key: "value" - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: fake-configmap - namespace: default -data: - foo: "bar" - -# -# Extra resources -# - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - events # needed for eventhandler integration - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: -- kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/internal/static/operator/testdata/test-integrations.out.yaml b/internal/static/operator/testdata/test-integrations.out.yaml deleted file mode 100644 index 80ddb00463..0000000000 --- a/internal/static/operator/testdata/test-integrations.out.yaml +++ /dev/null @@ -1,151 +0,0 @@ -# Resources to assert to exist when reconciling test-integrations.in.yaml. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: grafana-agent-example-integrations-deploy - namespace: default -spec: - template: - spec: - containers: - - name: config-reloader - - name: grafana-agent - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: SHARD - value: "0" - - name: SHARDS - value: "1" - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: grafana-agent-example-integrations-deploy-wal - mountPath: /var/lib/grafana-agent/data - - name: varlog - readOnly: true - mountPath: /var/log - - name: dockerlogs - readOnly: true - mountPath: /var/lib/docker/containers - volumes: - - name: config - secret: - secretName: grafana-agent-example-integrations-deploy-config - - name: config-out - emptyDir: {} - - name: secrets - secret: - secretName: grafana-agent-example-secrets - - name: grafana-agent-example-integrations-deploy-wal - emptyDir: {} - - name: varlog - hostPath: - path: /var/log - - name: dockerlogs - hostPath: - path: /var/lib/docker/containers - ---- - -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: grafana-agent-example-integrations-ds - namespace: default -spec: - template: - spec: - containers: - - name: config-reloader - - name: grafana-agent - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: SHARD - value: "0" - - name: SHARDS - value: "1" - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - volumeMounts: - - name: config - readOnly: true - mountPath: /var/lib/grafana-agent/config-in - - name: config-out - mountPath: /var/lib/grafana-agent/config - - name: secrets - readOnly: true - mountPath: /var/lib/grafana-agent/secrets - - name: default-node-exporter-proc - mountPath: /default/node_exporter/proc - - name: default-node-exporter-sys - mountPath: /default/node_exporter/sys - - name: default-node-exporter-root - mountPath: /default/node_exporter/rootfs - - name: grafana-agent-example-integrations-secrets - readOnly: true - mountPath: /etc/grafana-agent/integrations - - name: grafana-agent-example-integrations-ds-wal - mountPath: /var/lib/grafana-agent/data - - name: varlog - readOnly: true - mountPath: /var/log - - name: dockerlogs - readOnly: true - mountPath: /var/lib/docker/containers - volumes: - - name: config - secret: - secretName: grafana-agent-example-integrations-ds-config - - name: config-out - emptyDir: {} - - name: secrets - secret: - secretName: grafana-agent-example-secrets - - name: default-node-exporter-proc - hostPath: - path: /proc - - name: default-node-exporter-sys - hostPath: - path: /sys - - name: default-node-exporter-root - hostPath: - path: /root - - name: grafana-agent-example-integrations-secrets - secret: - secretName: grafana-agent-example-secrets - items: - - key: _secrets_default_fake_secret_key - path: secrets/default/fake-secret/key - - key: _configMaps_default_fake_configmap_foo - path: configMaps/default/fake-configmap/foo - - name: grafana-agent-example-integrations-ds-wal - emptyDir: {} - - name: varlog - hostPath: - path: /var/log - - name: dockerlogs - hostPath: - path: /var/lib/docker/containers diff --git a/internal/static/operator/testdata/test-metrics-instance.in.yaml b/internal/static/operator/testdata/test-metrics-instance.in.yaml deleted file mode 100644 index 911ed996ef..0000000000 --- a/internal/static/operator/testdata/test-metrics-instance.in.yaml +++ /dev/null @@ -1,84 +0,0 @@ -apiVersion: monitoring.grafana.com/v1alpha1 -kind: GrafanaAgent -metadata: - name: grafana-agent-example - namespace: default - labels: - app: grafana-agent-example -spec: - image: grafana/agent:latest - logLevel: debug - serviceAccountName: grafana-agent - storage: - volumeClaimTemplate: - spec: - resources: - requests: - storage: 1Gi - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-example - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - remoteWrite: - - url: http://prometheus.default.svc.cluster.local:9090/prometheus/api/v1/write - ---- - -# -# Extra resources -# - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: -- kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/internal/static/operator/testdata/test-metrics-instance.out.yaml b/internal/static/operator/testdata/test-metrics-instance.out.yaml deleted file mode 100644 index 47ae2ee867..0000000000 --- a/internal/static/operator/testdata/test-metrics-instance.out.yaml +++ /dev/null @@ -1,153 +0,0 @@ -# Resources to assert to exist when reconciling test-metrics-instance.in.yaml. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana-agent-example - namespace: default - labels: - operator.agent.grafana.com/name: grafana-agent-example - operator.agent.grafana.com/shard: "0" - operator.agent.grafana.com/type: metrics - app.kubernetes.io/instance: grafana-agent-example - app.kubernetes.io/managed-by: grafana-agent-operator - app.kubernetes.io/name: grafana-agent - grafana-agent: grafana-agent-example - ownerReferences: - - apiVersion: monitoring.grafana.com/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: GrafanaAgent - name: grafana-agent-example -spec: - replicas: 1 - serviceName: grafana-agent-example-operated - template: - spec: - containers: - - name: config-reloader - args: - - --config-file=/var/lib/grafana-agent/config-in/agent.yml - - --config-envsubst-file=/var/lib/grafana-agent/config/agent.yml - - --watch-interval=1m - - --statefulset-ordinal-from-envvar=POD_NAME - - --reload-url=http://127.0.0.1:8080/-/reload - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: SHARD - value: "0" - - name: SHARDS - value: "1" - volumeMounts: - - mountPath: /var/lib/grafana-agent/config-in - name: config - readOnly: true - - mountPath: /var/lib/grafana-agent/config - name: config-out - - mountPath: /var/lib/grafana-agent/secrets - name: secrets - readOnly: true - - mountPath: /var/lib/grafana-agent/data - name: grafana-agent-example-wal - - name: grafana-agent - args: - - -config.file=/var/lib/grafana-agent/config/agent.yml - - -config.expand-env=true - - -server.http.address=0.0.0.0:8080 - - -enable-features=integrations-next - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: SHARD - value: "0" - - name: SHARDS - value: "1" - ports: - - containerPort: 8080 - name: http-metrics - protocol: TCP - readinessProbe: - httpGet: - path: /-/ready - port: http-metrics - scheme: HTTP - volumeMounts: - - mountPath: /var/lib/grafana-agent/config-in - name: config - readOnly: true - - mountPath: /var/lib/grafana-agent/config - name: config-out - - mountPath: /var/lib/grafana-agent/secrets - name: secrets - readOnly: true - - mountPath: /var/lib/grafana-agent/data - name: grafana-agent-example-wal - serviceAccount: grafana-agent - serviceAccountName: grafana-agent - terminationGracePeriodSeconds: 4800 - volumes: - - name: config - secret: - secretName: grafana-agent-example-config - - name: config-out - emptyDir: {} - - name: secrets - secret: - secretName: grafana-agent-example-secrets - volumeClaimTemplates: - - metadata: - name: grafana-agent-example-wal - spec: - accessModes: [ReadWriteOnce] - resources: - requests: - storage: 1Gi - ---- - -apiVersion: v1 -kind: Secret -metadata: - name: grafana-agent-example-config - namespace: default - labels: - app.kubernetes.io/managed-by: grafana-agent-operator - ownerReferences: - - apiVersion: monitoring.grafana.com/v1alpha1 - kind: GrafanaAgent - name: grafana-agent-example -# We don't test the contents of the secret here; we use unit-level tests for -# asserting that the generated config is correct. - ---- - -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent-example-operated - namespace: default - labels: - app.kubernetes.io/managed-by: grafana-agent-operator - operated-agent: "true" - operator.agent.grafana.com/name: grafana-agent-example - ownerReferences: - - apiVersion: monitoring.grafana.com/v1alpha1 - kind: GrafanaAgent - name: grafana-agent-example -spec: - type: ClusterIP - ports: - - name: http-metrics - port: 8080 - protocol: TCP - targetPort: http-metrics - selector: - app.kubernetes.io/name: grafana-agent - operator.agent.grafana.com/name: grafana-agent-example diff --git a/internal/static/operator/testdata/test-resource-hierarchy.yaml b/internal/static/operator/testdata/test-resource-hierarchy.yaml deleted file mode 100644 index bcedb4d88e..0000000000 --- a/internal/static/operator/testdata/test-resource-hierarchy.yaml +++ /dev/null @@ -1,205 +0,0 @@ -apiVersion: monitoring.grafana.com/v1alpha1 -kind: GrafanaAgent -metadata: - name: grafana-agent-example - namespace: default - labels: - app: grafana-agent-example -spec: - image: grafana/agent:latest - serviceAccountName: grafana-agent - logs: - instanceSelector: - matchLabels: - agent: grafana-agent-example - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-example - integrations: - selector: - matchLabels: - agent: grafana-agent-example - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - remoteWrite: - - url: http://prometheus:9090/api/v1/write - basicAuth: - username: - name: prometheus-fake-credentials - key: fakeUsername - password: - name: prometheus-fake-credentials - key: fakePassword - # Supply an empty namespace selector to look in all namespaces. - podMonitorNamespaceSelector: {} - podMonitorSelector: - matchLabels: - instance: primary - # Supply an empty namespace selector to look in all namespaces. - serviceMonitorNamespaceSelector: {} - serviceMonitorSelector: - matchLabels: - instance: primary - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: LogsInstance -metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-example -spec: - clients: - - url: http://loki:8080/loki/api/v1/push - - # Supply an empty namespace selector to look in all namespaces. - podLogsNamespaceSelector: {} - podLogsSelector: - matchLabels: - instance: primary - ---- - -apiVersion: monitoring.grafana.com/v1alpha1 -kind: Integration -metadata: - name: node-exporter - namespace: default - labels: - agent: grafana-agent-example -spec: - name: node_exporter - type: - allNodes: true - unique: true - config: - rootfs_path: /rootfs - sysfs_path: /host/sys - procfs_path: /host/proc - volumeMounts: - - mountPath: /host/proc - name: proc - - mountPath: /host/sys - name: sys - - mountPath: /rootfs - name: root - volumes: - - hostPath: - path: /proc - name: proc - - hostPath: - path: /sys - name: sys - - hostPath: - path: / - name: root - ---- - -# Have the Agent monitor itself. -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: grafana-agents - namespace: default - labels: - instance: primary -spec: - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - podMetricsEndpoints: - - port: http-metrics - ---- - -# Have the Agent get logs from itself. -apiVersion: monitoring.grafana.com/v1alpha1 -kind: PodLogs -metadata: - name: grafana-agents - namespace: default - labels: - instance: primary -spec: - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - pipelineStages: - - cri: {} - -# -# Pretend credentials -# - ---- -apiVersion: v1 -kind: Secret -metadata: - name: prometheus-fake-credentials - namespace: default -data: - # "user" - fakeUsername: "dXNlcg==" - # "password" - fakePassword: "cGFzc3dvcmQ=" - -# -# Extra resources -# - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: -- kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go index 63a6fbb6c2..c347e5cbb5 100644 --- a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go +++ b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log/level" "github.com/go-logfmt/logfmt" "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/operator/config" "github.com/grafana/agent/internal/static/traces/contextkeys" - util "github.com/grafana/agent/internal/util/log" + "github.com/grafana/agent/internal/util" + util_log "github.com/grafana/agent/internal/util/log" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" @@ -55,7 +55,7 @@ type automaticLoggingProcessor struct { } func newTraceProcessor(nextConsumer consumer.Traces, cfg *AutomaticLoggingConfig) (processor.Traces, error) { - logger := log.With(util.Logger, "component", "traces automatic logging") + logger := log.With(util_log.Logger, "component", "traces automatic logging") if nextConsumer == nil { return nil, component.ErrNilNextConsumer @@ -170,7 +170,7 @@ func (p *automaticLoggingProcessor) spanLabels(keyValues []interface{}) model.La if _, ok := p.labels[k]; ok { // Loki does not accept "." as a valid character for labels // Dots . are replaced by underscores _ - k = config.SanitizeLabelName(k) + k = util.SanitizeLabelName(k) ls[model.LabelName(k)] = model.LabelValue(v) } diff --git a/internal/tools/packaging_test/agent_linux_packages_test.go b/internal/tools/packaging_test/agent_linux_packages_test.go index 280d963426..448daa698b 100644 --- a/internal/tools/packaging_test/agent_linux_packages_test.go +++ b/internal/tools/packaging_test/agent_linux_packages_test.go @@ -81,9 +81,7 @@ func (env *AgentEnvironment) TestInstall(t *testing.T) { res = env.ExecScript(`[ -f /usr/bin/grafana-agent ]`) require.Equal(t, 0, res.ExitCode, "expected grafana-agent to be installed") - res = env.ExecScript(`[ -f /usr/bin/grafana-agentctl ]`) - require.Equal(t, 0, res.ExitCode, "expected grafana-agentctl to be installed") - res = env.ExecScript(`[ -f /etc/grafana-agent.yaml ]`) + res = env.ExecScript(`[ -f /etc/grafana-agent.river ]`) require.Equal(t, 0, res.ExitCode, "expected grafana agent configuration file to exist") res = env.Uninstall() @@ -91,20 +89,18 @@ func (env *AgentEnvironment) TestInstall(t *testing.T) { res = env.ExecScript(`[ -f /usr/bin/grafana-agent ]`) require.Equal(t, 1, res.ExitCode, "expected grafana-agent to be uninstalled") - res = env.ExecScript(`[ -f /usr/bin/grafana-agentctl ]`) - require.Equal(t, 1, res.ExitCode, "expected grafana-agentctl to be uninstalled") // NOTE(rfratto): we don't check for what happens to the config file here, // since the behavior is inconsistent: rpm uninstalls it, but deb doesn't. } func (env *AgentEnvironment) TestConfigPersistence(t *testing.T) { - res := env.ExecScript(`echo -n "keepalive" > /etc/grafana-agent.yaml`) + res := env.ExecScript(`echo -n "keepalive" > /etc/grafana-agent.river`) require.Equal(t, 0, res.ExitCode, "failed to write config file") res = env.Install() require.Equal(t, 0, res.ExitCode, "installation failed") - res = env.ExecScript(`cat /etc/grafana-agent.yaml`) + res = env.ExecScript(`cat /etc/grafana-agent.river`) require.Equal(t, "keepalive", res.Stdout, "Expected existing file to not be overridden") } diff --git a/internal/tools/packaging_test/flow_linux_packages_test.go b/internal/tools/packaging_test/flow_linux_packages_test.go deleted file mode 100644 index 2f449ac3b4..0000000000 --- a/internal/tools/packaging_test/flow_linux_packages_test.go +++ /dev/null @@ -1,119 +0,0 @@ -//go:build !nonetwork && !nodocker && !race && packaging -// +build !nonetwork,!nodocker,!race,packaging - -package packaging_test - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "testing" - - "github.com/ory/dockertest/v3" - "github.com/stretchr/testify/require" -) - -// TestFlowLinuxPackages runs the entire test suite for the Linux packages. -func TestFlowLinuxPackages(t *testing.T) { - packageName := "grafana-agent-flow" - - fmt.Println("Building packages (this may take a while...)") - buildFlowPackages(t) - - dockerPool, err := dockertest.NewPool("") - require.NoError(t, err) - - tt := []struct { - name string - f func(*FlowEnvironment, *testing.T) - }{ - {"install package", (*FlowEnvironment).TestInstall}, - {"ensure existing config doesn't get overridden", (*FlowEnvironment).TestConfigPersistence}, - {"test data folder permissions", (*FlowEnvironment).TestDataFolderPermissions}, - - // TODO: a test to verify that the systemd service works would be nice, but not - // required. - // - // An implementation of the test would have to consider what host platforms it - // works on; bind mounting /sys/fs/cgroup and using the host systemd wouldn't - // work on macOS or Windows. - } - - for _, tc := range tt { - t.Run(tc.name+"/rpm", func(t *testing.T) { - env := &FlowEnvironment{RPMEnvironment(t, packageName, dockerPool)} - tc.f(env, t) - }) - t.Run(tc.name+"/deb", func(t *testing.T) { - env := &FlowEnvironment{DEBEnvironment(t, packageName, dockerPool)} - tc.f(env, t) - }) - } -} - -func buildFlowPackages(t *testing.T) { - t.Helper() - - wd, err := os.Getwd() - require.NoError(t, err) - root, err := filepath.Abs(filepath.Join(wd, "../../..")) - require.NoError(t, err) - - cmd := exec.Command("make", fmt.Sprintf("dist-agent-flow-packages-%s", runtime.GOARCH)) - cmd.Env = append( - os.Environ(), - "VERSION=v0.0.0", - "DOCKER_OPTS=", - ) - cmd.Dir = root - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - require.NoError(t, cmd.Run()) -} - -type FlowEnvironment struct{ Environment } - -func (env *FlowEnvironment) TestInstall(t *testing.T) { - res := env.Install() - require.Equal(t, 0, res.ExitCode, "installing failed") - - res = env.ExecScript(`[ -f /usr/bin/grafana-agent-flow ]`) - require.Equal(t, 0, res.ExitCode, "expected grafana-agent-flow to be installed") - res = env.ExecScript(`[ -f /etc/grafana-agent-flow.river ]`) - require.Equal(t, 0, res.ExitCode, "expected grafana agent configuration file to exist") - - res = env.Uninstall() - require.Equal(t, 0, res.ExitCode, "uninstalling failed") - - res = env.ExecScript(`[ -f /usr/bin/grafana-agent-flow ]`) - require.Equal(t, 1, res.ExitCode, "expected grafana-agent-flow to be uninstalled") - // NOTE(rfratto): we don't check for what happens to the config file here, - // since the behavior is inconsistent: rpm uninstalls it, but deb doesn't. -} - -func (env *FlowEnvironment) TestConfigPersistence(t *testing.T) { - res := env.ExecScript(`echo -n "keepalive" > /etc/grafana-agent.river`) - require.Equal(t, 0, res.ExitCode, "failed to write config file") - - res = env.Install() - require.Equal(t, 0, res.ExitCode, "installation failed") - - res = env.ExecScript(`cat /etc/grafana-agent.river`) - require.Equal(t, "keepalive", res.Stdout, "Expected existing file to not be overridden") -} - -func (env *FlowEnvironment) TestDataFolderPermissions(t *testing.T) { - // Installing should create /var/lib/grafana-agent, assign it to the - // grafana-agent user and group, and set its permissions to 0770. - res := env.Install() - require.Equal(t, 0, res.ExitCode, "installation failed") - - res = env.ExecScript(`[ -d /var/lib/grafana-agent-flow ]`) - require.Equal(t, 0, res.ExitCode, "Expected /var/lib/grafana-agent-flow to have been created during install") - - res = env.ExecScript(`stat -c '%a:%U:%G' /var/lib/grafana-agent-flow`) - require.Equal(t, "770:grafana-agent-flow:grafana-agent\n", res.Stdout, "wrong permissions for data folder") - require.Equal(t, 0, res.ExitCode, "stat'ing data folder failed") -} diff --git a/internal/util/k8s/k8s.go b/internal/util/k8s/k8s.go deleted file mode 100644 index c1c883cb82..0000000000 --- a/internal/util/k8s/k8s.go +++ /dev/null @@ -1,132 +0,0 @@ -// Package k8s spins up a Kubernetes cluster for testing. -package k8s - -import ( - "context" - "fmt" - "log" - - gragent "github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1" - promop_v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/testcontainers/testcontainers-go/modules/k3s" - apiextensions_v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cluster is a Kubernetes cluster that runs inside of a k3s Docker container. -// Call GetConfig to retrieve a Kubernetes *rest.Config to use to connect to -// the cluster. -// -// Note that k3s uses containerd as its runtime, which means local Docker -// images are not immediately available for use. To push local images to a -// container, call PushImages. It's recommended that tests use image names that -// are not available on Docker Hub to avoid accidentally testing against the -// wrong image. -// -// Cluster should be stopped by calling Stop, otherwise running Docker -// containers will leak. -type Cluster struct { - k3sContainer *k3s.K3sContainer - restConfig *rest.Config - kubeClient client.Client -} - -// Options control creation of a cluster. -type Options struct { - // Scheme is the Kubernetes scheme used for the generated Kubernetes client. - // If nil, a generated scheme that contains all known Kubernetes API types - // will be generated. - Scheme *runtime.Scheme -} - -func (o *Options) applyDefaults() error { - if o.Scheme == nil { - o.Scheme = runtime.NewScheme() - - for _, add := range []func(*runtime.Scheme) error{ - scheme.AddToScheme, - apiextensions_v1.AddToScheme, - gragent.AddToScheme, - promop_v1.AddToScheme, - } { - if err := add(o.Scheme); err != nil { - return fmt.Errorf("unable to register scheme: %w", err) - } - } - } - return nil -} - -// NewCluster creates a new Cluster. NewCluster won't return with success until -// the cluster is running, but things like the ingress controller might not be -// running right away. You should never assume that any resource in the cluster -// is running and utilize exponential backoffs to allow time for things to spin -// up. -func NewCluster(ctx context.Context, o Options) (cluster *Cluster, err error) { - if err := o.applyDefaults(); err != nil { - return nil, fmt.Errorf("failed to apply defaults to options: %w", err) - } - - container, err := k3s.RunContainer(ctx) - defer func() { - // We don't want to leak the cluster here, and we can't really be sure how - // many resources exist, even if ClusterRun fails. If we never set our - // cluster return argument, we'll delete the k3s cluster. This also - // gracefully handles panics. - if cluster == nil && container != nil { - _ = container.Terminate(ctx) - } - }() - if err != nil { - return nil, fmt.Errorf("failed to run cluster: %w", err) - } - - rawConfig, err := container.GetKubeConfig(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get kubeconfig: %w", err) - } - restCfg, err := clientcmd.RESTConfigFromKubeConfig(rawConfig) - if err != nil { - return nil, fmt.Errorf("failed to parse kubeconfig: %w", err) - } - - kubeClient, err := client.New(restCfg, client.Options{ - Scheme: o.Scheme, - }) - if err != nil { - return nil, fmt.Errorf("failed to generate client: %w", err) - } - - return &Cluster{ - k3sContainer: container, - restConfig: restCfg, - kubeClient: kubeClient, - }, nil -} - -// Client returns the Kubernetes client for this Cluster. Client is handling -// objects registered to the Scheme passed to Options when creating the -// cluster. -func (c *Cluster) Client() client.Client { - return c.kubeClient -} - -// GetConfig returns a *rest.Config that can be used to connect to the -// Kubernetes cluster. The returned Config is a copy and is safe for -// modification. -func (c *Cluster) GetConfig() *rest.Config { - return rest.CopyConfig(c.restConfig) -} - -// Stop shuts down and deletes the cluster. Stop must be called to clean up -// created Docker resources. -func (c *Cluster) Stop() { - err := c.k3sContainer.Terminate(context.Background()) - if err != nil { - log.Printf("failed to shut down cluster, docker containers may have leaked: %s", err) - } -} diff --git a/internal/util/k8s/k8s_test.go b/internal/util/k8s/k8s_test.go deleted file mode 100644 index 50b91009cd..0000000000 --- a/internal/util/k8s/k8s_test.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !nonetwork && !nodocker && !race - -package k8s - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - core "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func TestCluster(t *testing.T) { - // TODO: this is broken with go 1.20.6 - // waiting on https://github.com/testcontainers/testcontainers-go/issues/1359 - t.Skip() - ctx := context.Background() - - cluster, err := NewCluster(ctx, Options{}) - require.NoError(t, err) - defer cluster.Stop() - - cli, err := client.New(cluster.GetConfig(), client.Options{}) - require.NoError(t, err) - - var nss core.NamespaceList - require.NoError(t, cli.List(ctx, &nss)) - - names := make([]string, len(nss.Items)) - for i, ns := range nss.Items { - names[i] = ns.Name - } - require.Contains(t, names, "kube-system") -} diff --git a/internal/util/k8s/objects.go b/internal/util/k8s/objects.go deleted file mode 100644 index 0250d9bc05..0000000000 --- a/internal/util/k8s/objects.go +++ /dev/null @@ -1,191 +0,0 @@ -package k8s - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/dskit/backoff" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/yaml" - "sigs.k8s.io/controller-runtime/pkg/client" - - apps_v1 "k8s.io/api/apps/v1" - core_v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// CreateObjects will create the provided set of objects. If any object -// couldn't be created, an error will be returned and created objects will be -// deleted. -func CreateObjects(ctx context.Context, cli client.Client, objs ...client.Object) (err error) { - // Index offset into objs for objects we managed to create. - createdOffset := -1 - - defer func() { - if err == nil { - return - } - // Delete the subset of objs we managed to create - for i := 0; i <= createdOffset; i++ { - _ = cli.Delete(context.Background(), objs[i]) - } - }() - - for i, obj := range objs { - if err := cli.Create(ctx, obj); err != nil { - return fmt.Errorf("failed to create %s: %w", client.ObjectKeyFromObject(obj), err) - } - createdOffset = i - } - return nil -} - -// ReadObjects will read the set of objects from r and convert them into -// client.Object based on the scheme of the provided Kubernetes client. -// -// The data of r may be YAML or JSON. -func ReadObjects(r io.Reader, cli client.Client) ([]client.Object, error) { - var ( - objects []client.Object - - scheme = cli.Scheme() - rawDecoder = yaml.NewYAMLOrJSONDecoder(r, 4096) - decoder = serializer.NewCodecFactory(scheme).UniversalDecoder(scheme.PrioritizedVersionsAllGroups()...) - ) - -NextObject: - for { - var raw json.RawMessage - - err := rawDecoder.Decode(&raw) - switch { - case errors.Is(err, io.EOF): - break NextObject - case err != nil: - return nil, fmt.Errorf("error parsing object: %w", err) - case len(raw) == 0: - // Skip over empty objects. This can happen when --- is used at the top - // of YAML files. - continue NextObject - } - - obj, _, err := decoder.Decode(raw, nil, nil) - if err != nil { - return nil, fmt.Errorf("failed to decode object: %w", err) - } - clientObj, ok := obj.(client.Object) - if !ok { - return nil, fmt.Errorf("decoded object %T is not a controller-runtime object", obj) - } - objects = append(objects, clientObj) - } - - return objects, nil -} - -// ReadUnstructuredObjects will read the set of objects from r as unstructured -// objects. -func ReadUnstructuredObjects(r io.Reader) ([]*unstructured.Unstructured, error) { - var ( - objects []*unstructured.Unstructured - rawDecoder = yaml.NewYAMLOrJSONDecoder(r, 4096) - ) - -NextObject: - for { - var raw json.RawMessage - - err := rawDecoder.Decode(&raw) - switch { - case errors.Is(err, io.EOF): - break NextObject - case err != nil: - return nil, fmt.Errorf("error parsing object: %w", err) - case len(raw) == 0: - // Skip over empty objects. This can happen when --- is used at the top - // of YAML files. - continue NextObject - } - - var us unstructured.Unstructured - if err := json.Unmarshal(raw, &us); err != nil { - return nil, fmt.Errorf("failed to decode object: %w", err) - } - objects = append(objects, &us) - } - - return objects, nil -} - -// DefaultBackoff is a default backoff config that retries forever until ctx is -// canceled. -var DefaultBackoff = backoff.Config{ - MinBackoff: 100 * time.Millisecond, - MaxBackoff: 1 * time.Second, -} - -// WaitReady will return with no error if obj becomes ready before ctx cancels -// or the backoff fails. -// -// obj may be one of: DaemonSet, StatefulSet, Deployment, Pod. obj must have -// namespace and name set so it can be found. obj will be updated with the -// state of the object in the cluster as WaitReady runs. -// -// The final state of the object will be returned when it is ready. -func WaitReady(ctx context.Context, cli client.Client, obj client.Object, bc backoff.Config) error { - bo := backoff.New(ctx, bc) - - key := client.ObjectKeyFromObject(obj) - - var readyCheck func() bool - switch obj := obj.(type) { - case *apps_v1.DaemonSet: - readyCheck = func() bool { - return obj.Status.NumberReady >= obj.Status.UpdatedNumberScheduled - } - case *apps_v1.StatefulSet: - readyCheck = func() bool { - return obj.Status.ReadyReplicas >= obj.Status.UpdatedReplicas - } - case *apps_v1.Deployment: - readyCheck = func() bool { - return obj.Status.ReadyReplicas >= obj.Status.UpdatedReplicas - } - case *core_v1.Pod: - readyCheck = func() bool { - phase := obj.Status.Phase - return phase == core_v1.PodRunning || phase == core_v1.PodSucceeded - } - } - - for bo.Ongoing() { - err := cli.Get(ctx, key, obj) - if err == nil && readyCheck() { - break - } - bo.Wait() - } - - return bo.Err() -} - -// Wait calls done until ctx is canceled or check returns nil. Returns an error -// if ctx is canceled. -func Wait(ctx context.Context, l log.Logger, check func() error) error { - bo := backoff.New(ctx, DefaultBackoff) - for bo.Ongoing() { - err := check() - if err == nil { - return nil - } - level.Error(l).Log("msg", "check failed", "err", err) - bo.Wait() - } - return bo.Err() -} diff --git a/internal/util/k8s/resources.go b/internal/util/k8s/resources.go deleted file mode 100644 index 0b137e984f..0000000000 --- a/internal/util/k8s/resources.go +++ /dev/null @@ -1,101 +0,0 @@ -package k8s - -import ( - "context" - "fmt" - "io" - "os" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/dskit/backoff" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ResourceSet deploys a set of temporary objects to a k8s test cluster and -// deletes them when Stop is called. -type ResourceSet struct { - log log.Logger - kubeClient client.Client - objects []client.Object -} - -// NewResourceSet returns a new resource set. -func NewResourceSet(l log.Logger, cluster *Cluster) *ResourceSet { - return &ResourceSet{ - log: l, - kubeClient: cluster.Client(), - } -} - -// Add will read from r and deploy the resources into the cluster. -func (rs *ResourceSet) Add(ctx context.Context, r io.Reader) error { - readObjects, err := ReadObjects(r, rs.kubeClient) - if err != nil { - return fmt.Errorf("error reading fixture: %w", err) - } - err = CreateObjects(ctx, rs.kubeClient, readObjects...) - if err != nil { - return err - } - - rs.objects = append(rs.objects, readObjects...) - return nil -} - -// AddFile will open filename and deploy it into the cluster. -func (rs *ResourceSet) AddFile(ctx context.Context, filename string) error { - f, err := os.Open(filename) - if err != nil { - return fmt.Errorf("failed to open %q: %w", filename, err) - } - defer f.Close() - return rs.Add(ctx, f) -} - -// Wait waits until all of the ResourceSet's objects can be found. -func (rs *ResourceSet) Wait(ctx context.Context) error { - bo := backoff.New(ctx, backoff.Config{ - MinBackoff: 10 * time.Millisecond, - MaxBackoff: 100 * time.Second, - }) - - check := func() error { - for _, obj := range rs.objects { - key := client.ObjectKeyFromObject(obj) - - clone := obj.DeepCopyObject().(client.Object) - if err := rs.kubeClient.Get(ctx, key, clone); err != nil { - return fmt.Errorf("failed to get %s: %w", key, err) - } - } - - return nil - } - - for bo.Ongoing() { - err := check() - if err == nil { - return nil - } - - level.Debug(rs.log).Log("msg", "not all resources are available; waiting", "err", err) - bo.Wait() - } - - return bo.Err() -} - -// Stop removes deployed resources from the cluster. -func (rs *ResourceSet) Stop() { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - for _, obj := range rs.objects { - err := rs.kubeClient.Delete(ctx, obj) - if err != nil { - level.Error(rs.log).Log("msg", "failed to delete object", "obj", client.ObjectKeyFromObject(obj), "err", err) - } - } -} diff --git a/internal/util/sanitize.go b/internal/util/sanitize.go new file mode 100644 index 0000000000..f47595b3aa --- /dev/null +++ b/internal/util/sanitize.go @@ -0,0 +1,10 @@ +package util + +import "regexp" + +var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +// SanitizeLabelName sanitizes a label name for Prometheus. +func SanitizeLabelName(name string) string { + return invalidLabelCharRE.ReplaceAllString(name, "_") +} diff --git a/operations/agent-flow-mixin/alerts.libsonnet b/operations/agent-mixin/alerts.libsonnet similarity index 100% rename from operations/agent-flow-mixin/alerts.libsonnet rename to operations/agent-mixin/alerts.libsonnet diff --git a/operations/agent-flow-mixin/alerts/clustering.libsonnet b/operations/agent-mixin/alerts/clustering.libsonnet similarity index 100% rename from operations/agent-flow-mixin/alerts/clustering.libsonnet rename to operations/agent-mixin/alerts/clustering.libsonnet diff --git a/operations/agent-flow-mixin/alerts/controller.libsonnet b/operations/agent-mixin/alerts/controller.libsonnet similarity index 100% rename from operations/agent-flow-mixin/alerts/controller.libsonnet rename to operations/agent-mixin/alerts/controller.libsonnet diff --git a/operations/agent-flow-mixin/alerts/opentelemetry.libsonnet b/operations/agent-mixin/alerts/opentelemetry.libsonnet similarity index 100% rename from operations/agent-flow-mixin/alerts/opentelemetry.libsonnet rename to operations/agent-mixin/alerts/opentelemetry.libsonnet diff --git a/operations/agent-flow-mixin/alerts/utils/alert.jsonnet b/operations/agent-mixin/alerts/utils/alert.jsonnet similarity index 100% rename from operations/agent-flow-mixin/alerts/utils/alert.jsonnet rename to operations/agent-mixin/alerts/utils/alert.jsonnet diff --git a/operations/agent-flow-mixin/dashboards.libsonnet b/operations/agent-mixin/dashboards.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards.libsonnet rename to operations/agent-mixin/dashboards.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/cluster-node.libsonnet b/operations/agent-mixin/dashboards/cluster-node.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/cluster-node.libsonnet rename to operations/agent-mixin/dashboards/cluster-node.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/cluster-overview.libsonnet b/operations/agent-mixin/dashboards/cluster-overview.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/cluster-overview.libsonnet rename to operations/agent-mixin/dashboards/cluster-overview.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/controller.libsonnet b/operations/agent-mixin/dashboards/controller.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/controller.libsonnet rename to operations/agent-mixin/dashboards/controller.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet b/operations/agent-mixin/dashboards/opentelemetry.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet rename to operations/agent-mixin/dashboards/opentelemetry.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/prometheus.libsonnet b/operations/agent-mixin/dashboards/prometheus.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/prometheus.libsonnet rename to operations/agent-mixin/dashboards/prometheus.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/resources.libsonnet b/operations/agent-mixin/dashboards/resources.libsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/resources.libsonnet rename to operations/agent-mixin/dashboards/resources.libsonnet diff --git a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet b/operations/agent-mixin/dashboards/utils/dashboard.jsonnet similarity index 94% rename from operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet rename to operations/agent-mixin/dashboards/utils/dashboard.jsonnet index 63427a46c9..e17b0ff678 100644 --- a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet +++ b/operations/agent-mixin/dashboards/utils/dashboard.jsonnet @@ -7,8 +7,8 @@ timezone: 'utc', refresh: '10s', schemaVersion: 36, - graphTooltip: 1, // shared crosshair for all graphs - tags: ['grafana-agent-flow-mixin'], + graphTooltip: 1, // shared crosshair for all graphs + tags: ['grafana-agent-mixin'], templating: { list: [{ name: 'datasource', @@ -122,7 +122,7 @@ icon: 'external link', includeVars: true, keepTime: true, - tags: ['grafana-agent-flow-mixin'], + tags: ['grafana-agent-mixin'], targetBlank: false, }], }, diff --git a/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet b/operations/agent-mixin/dashboards/utils/panel.jsonnet similarity index 100% rename from operations/agent-flow-mixin/dashboards/utils/panel.jsonnet rename to operations/agent-mixin/dashboards/utils/panel.jsonnet diff --git a/operations/agent-flow-mixin/grizzly.jsonnet b/operations/agent-mixin/grizzly.jsonnet similarity index 100% rename from operations/agent-flow-mixin/grizzly.jsonnet rename to operations/agent-mixin/grizzly.jsonnet diff --git a/operations/agent-flow-mixin/grizzly/alerts.jsonnet b/operations/agent-mixin/grizzly/alerts.jsonnet similarity index 100% rename from operations/agent-flow-mixin/grizzly/alerts.jsonnet rename to operations/agent-mixin/grizzly/alerts.jsonnet diff --git a/operations/agent-flow-mixin/grizzly/dashboards.jsonnet b/operations/agent-mixin/grizzly/dashboards.jsonnet similarity index 100% rename from operations/agent-flow-mixin/grizzly/dashboards.jsonnet rename to operations/agent-mixin/grizzly/dashboards.jsonnet diff --git a/operations/agent-flow-mixin/jsonnetfile.json b/operations/agent-mixin/jsonnetfile.json similarity index 100% rename from operations/agent-flow-mixin/jsonnetfile.json rename to operations/agent-mixin/jsonnetfile.json diff --git a/operations/agent-flow-mixin/mixin.libsonnet b/operations/agent-mixin/mixin.libsonnet similarity index 100% rename from operations/agent-flow-mixin/mixin.libsonnet rename to operations/agent-mixin/mixin.libsonnet diff --git a/operations/agent-static-mixin/alerts.libsonnet b/operations/agent-static-mixin/alerts.libsonnet deleted file mode 100644 index 653be89ce4..0000000000 --- a/operations/agent-static-mixin/alerts.libsonnet +++ /dev/null @@ -1,288 +0,0 @@ -local config = import 'config.libsonnet'; -local _config = config._config; - -{ - prometheusAlerts+:: { - groups+: [ - { - name: 'grafana-agent-tracing', - rules: [ - { - alert: 'AgentTracingReceiverErrors', - // TODO(@mapno): add recording rule for total spans - expr: ||| - 100 * sum(rate(traces_receiver_refused_spans{receiver!="otlp/lb"}[1m])) by (%(group_by_cluster)s, receiver) - / - (sum(rate(traces_receiver_refused_spans{receiver!="otlp/lb"}[1m])) by (%(group_by_cluster)s, receiver) + sum(rate(traces_receiver_accepted_spans{receiver!="otlp/lb"}[1m])) by (%(group_by_cluster)s, receiver)) - > 10 - ||| % _config, - 'for': '15m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Receiver {{ $labels.receiver }} is experiencing {{ printf "%.2f" $value }}% errors. - |||, - }, - }, - { - alert: 'AgentTracingExporterErrors', - // TODO(@mapno): add recording rule for total spans - expr: ||| - 100 * sum(rate(traces_exporter_send_failed_spans{exporter!="otlp"}[1m])) by (%(group_by_cluster)s, exporter) - / - (sum(rate(traces_exporter_send_failed_spans{exporter!="otlp"}[1m])) by (%(group_by_cluster)s, exporter) + sum(rate(traces_exporter_sent_spans{exporter!="otlp"}[1m])) by (%(group_by_cluster)s, exporter)) - > 10 - ||| % _config, - 'for': '15m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Exporter {{ $labels.exporter }} is experiencing {{ printf "%.2f" $value }}% errors. - |||, - }, - }, - { - alert: 'AgentTracingLoadBalancingErrors', - expr: ||| - 100 * sum(rate(traces_loadbalancer_backend_outcome{success="false"}[1m])) by (%(group_by_cluster)s) - / - sum(rate(traces_loadbalancer_backend_outcome{success="true"}[1m])) by (%(group_by_cluster)s) - > 10 - ||| % _config, - 'for': '15m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Load balancing is experiencing {{ printf "%.2f" $value }}% errors. - |||, - }, - }, - ], - }, - { - name: 'VultureChecks', - rules: [ - { - alert: 'VultureDown', - expr: ||| - up{job=~"agent-smoke-test/vulture"} == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} is down.', - }, - }, - { - alert: 'VultureFlapping', - expr: ||| - avg_over_time(up{job=~"agent-smoke-test/vulture"}[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: 'Vulture {{ $labels.job }} is flapping.', - }, - }, - { - alert: 'VultureNotScraped', - expr: ||| - rate(tempo_vulture_trace_total[1m]) == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} is not being scraped.', - }, - }, - { - alert: 'VultureFailures', - expr: ||| - (rate(tempo_vulture_error_total[5m]) / rate(tempo_vulture_trace_total[5m])) > 0.3 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} has had failures for at least 5m', - }, - }, - ], - }, - { - name: 'GrafanaAgentConfig', - rules: [ - { - alert: 'AgentRemoteConfigBadAPIRequests', - expr: ||| - 100 * sum(rate(agent_remote_config_fetches_total{status_code=~"(4|5).."}[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 5 - ||| % _config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Receiving HTTP {{ $labels.status_code }} errors from API in {{ printf "%.2f" $value }}% of cases. - |||, - }, - }, - { - alert: 'AgentRemoteConfigBadAPIRequests', - expr: ||| - 100 * sum(rate(agent_remote_config_fetches_total{status_code=~"(4|5).."}[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 10 - ||| % _config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - message: ||| - Receiving HTTP {{ $labels.status_code }} errors from API in {{ printf "%.2f" $value }}% of cases. - |||, - }, - }, - { - alert: 'AgentRemoteConfigFetchErrors', - expr: ||| - 100 * sum(rate(agent_remote_config_fetch_errors_total[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 5 - ||| % _config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Failing to reach Agent Management API. - |||, - }, - }, - { - alert: 'AgentRemoteConfigFetchErrors', - expr: ||| - 100 * sum(rate(agent_remote_config_fetch_errors_total[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 10 - ||| % _config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - message: ||| - Failing to reach Agent Management API. - |||, - }, - }, - { - alert: 'AgentRemoteConfigInvalidAPIResponse', - expr: ||| - 100 * sum(rate(agent_remote_config_invalid_total{reason=~".+"}[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 5 - ||| % _config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - API is responding with {{ $labels.reason }} in {{ printf "%.2f" $value }}% of cases. - |||, - }, - }, - { - alert: 'AgentRemoteConfigInvalidAPIResponse', - expr: ||| - 100 * sum(rate(agent_remote_config_invalid_total{reason=~".+"}[10m])) by (%(group_by_cluster)s) - / - sum(rate(agent_remote_config_fetches_total[10m])) by (%(group_by_cluster)s) - > 10 - ||| % _config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - message: ||| - API is responding with {{ $labels.reason }} in {{ printf "%.2f" $value }}% of cases. - |||, - }, - }, - { - alert: 'AgentFailureToReloadConfig', - expr: ||| - avg_over_time(agent_config_last_load_successful[10m]) < 0.9 - ||| % _config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Instance {{ $labels.instance }} failed to successfully reload the config. - |||, - }, - }, - { - alert: 'AgentFailureToReloadConfig', - expr: ||| - avg_over_time(agent_config_last_load_successful[10m]) < 0.9 - ||| % _config, - 'for': '30m', - labels: { - severity: 'critical', - }, - annotations: { - message: ||| - Instance {{ $labels.instance }} failed to successfully reload the config. - |||, - }, - }, - { - alert: 'AgentManagementFallbackToEmptyConfig', - expr: ||| - sum(rate(agent_management_config_fallbacks_total{fallback_to="empty_config"}[10m])) by (%(group_by_cluster)s) > 0 - ||| % _config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - message: ||| - Instance {{ $labels.instance }} fell back to empty configuration. - |||, - }, - }, - { - alert: 'AgentManagementFallbackToEmptyConfig', - expr: ||| - sum(rate(agent_management_config_fallbacks_total{fallback_to="empty_config"}[10m])) by (%(group_by_cluster)s) > 0 - ||| % _config, - 'for': '30m', - labels: { - severity: 'critical', - }, - annotations: { - message: ||| - Instance {{ $labels.instance }} fell back to empty configuration. - |||, - }, - }, - ], - }, - ], - }, -} diff --git a/operations/agent-static-mixin/config.libsonnet b/operations/agent-static-mixin/config.libsonnet deleted file mode 100644 index 8a7df26c0d..0000000000 --- a/operations/agent-static-mixin/config.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -{ - local makeGroupBy(groups) = std.join(', ', groups), - - _config+:: { - namespace: '.*', - - // Groups labels to uniquely identify and group by clusters - cluster_selectors: ['cluster', 'namespace'], - - // Each group-by label list is `, `-separated and unique identifies - group_by_cluster: makeGroupBy($._config.cluster_selectors), - }, -} diff --git a/operations/agent-static-mixin/dashboards.libsonnet b/operations/agent-static-mixin/dashboards.libsonnet deleted file mode 100644 index 834ec51813..0000000000 --- a/operations/agent-static-mixin/dashboards.libsonnet +++ /dev/null @@ -1,789 +0,0 @@ -local utils = import './utils.libsonnet'; -local g = import 'grafana-builder/grafana.libsonnet'; -local grafana = import 'grafonnet/grafana.libsonnet'; - -local dashboard = grafana.dashboard; -local row = grafana.row; -local singlestat = grafana.singlestat; -local prometheus = grafana.prometheus; -local graphPanel = grafana.graphPanel; -local tablePanel = grafana.tablePanel; -local template = grafana.template; - -{ - grafanaDashboards+:: { - 'agent.json': - utils.injectUtils(g.dashboard('Agent')) - .addMultiTemplate('cluster', 'agent_build_info', 'cluster') - .addMultiTemplate('namespace', 'agent_build_info', 'namespace') - .addMultiTemplate('container', 'agent_build_info', 'container') - .addMultiTemplateWithAll('pod', 'agent_build_info{container=~"$container"}', 'pod', all='grafana-agent-.*') - .addRow( - g.row('Agent Stats') - .addPanel( - g.panel('Agent Stats') + - g.tablePanel([ - 'count by (pod, container, version) (agent_build_info{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"})', - 'max by (pod, container) (time() - process_start_time_seconds{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"})', - ], { - pod: { alias: 'Pod' }, - container: { alias: 'Container' }, - version: { alias: 'Version' }, - 'Value #A': { alias: 'Count', type: 'hidden' }, - 'Value #B': { alias: 'Uptime' }, - }) - ) - ) - .addRow( - g.row('Prometheus Discovery') - .addPanel( - g.panel('Target Sync') + - g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])) by (pod, scrape_job) * 1e3', '{{pod}}/{{scrape_job}}') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Targets') + - g.queryPanel('sum by (pod) (prometheus_sd_discovered_targets{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"})', '{{pod}}') + - g.stack - ) - ) - .addRow( - g.row('Prometheus Retrieval') - .addPanel( - g.panel('Average Scrape Interval Duration') + - g.queryPanel(||| - rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m]) - / - rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m]) - * 1e3 - |||, '{{pod}} {{interval}} configured') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Scrape failures') + - g.queryPanel([ - 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]))', - ], [ - 'exceeded sample limit: {{job}}', - 'duplicate timestamp: {{job}}', - 'out of bounds: {{job}}', - 'out of order: {{job}}', - ]) + - g.stack - ) - .addPanel( - g.panel('Appended Samples') + - g.queryPanel('sum by (job, instance_group_name) (rate(agent_wal_samples_appended_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m]))', '{{job}} {{instance_group_name}}') + - g.stack - ) - ), - - // Remote write specific dashboard. - 'agent-remote-write.json': - local timestampComparison = - graphPanel.new( - 'Highest Timestamp In vs. Highest Timestamp Sent', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - ||| - ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"} - - - ignoring(url, remote_name) group_right(pod) - prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"} - ) - |||, - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local remoteSendLatency = - graphPanel.new( - 'Latency [1m]', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]) / rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m])', - legendFormat='mean {{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )) - .addTarget(prometheus.target( - 'histogram_quantile(0.99, rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[1m]))', - legendFormat='p99 {{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local samplesInRate = - graphPanel.new( - 'Rate in [5m]', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(agent_wal_samples_appended_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local samplesOutRate = - graphPanel.new( - 'Rate succeeded [5m]', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local currentShards = - graphPanel.new( - 'Current Shards', - datasource='$datasource', - span=12, - min_span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local maxShards = - graphPanel.new( - 'Max Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local minShards = - graphPanel.new( - 'Min Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local desiredShards = - graphPanel.new( - 'Desired Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local shardsCapacity = - graphPanel.new( - 'Shard Capacity', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local pendingSamples = - graphPanel.new( - 'Pending Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_samples_pending{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local queueSegment = - graphPanel.new( - 'Remote Write Current Segment', - datasource='$datasource', - span=6, - formatY1='none', - ) - .addTarget(prometheus.target( - 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local droppedSamples = - graphPanel.new( - 'Dropped Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local failedSamples = - graphPanel.new( - 'Failed Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local retriedSamples = - graphPanel.new( - 'Retried Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - local enqueueRetries = - graphPanel.new( - 'Enqueue Retries', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container"}[5m])', - legendFormat='{{cluster}}:{{pod}}-{{instance_group_name}}-{{url}}', - )); - - dashboard.new('Agent Prometheus Remote Write', tags=['grafana-agent-mixin'], editable=true, refresh='30s', time_from='now-1h') - .addTemplate( - { - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(agent_build_info, cluster)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'namespace', - '$datasource', - 'label_values(agent_build_info, namespace)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'container', - '$datasource', - 'label_values(agent_build_info, container)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'pod', - '$datasource', - 'label_values(agent_build_info{container=~"$container"}, pod)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'url', - '$datasource', - 'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", pod=~"$pod"}, url)', - refresh='time', - includeAll=true, - ) - ) - .addRow( - row.new('Timestamps') - .addPanel(timestampComparison) - .addPanel(remoteSendLatency) - ) - .addRow( - row.new('Samples') - .addPanel(samplesInRate) - .addPanel(samplesOutRate) - .addPanel(pendingSamples) - .addPanel(droppedSamples) - .addPanel(failedSamples) - .addPanel(retriedSamples) - ) - .addRow( - row.new('Shards') - .addPanel(currentShards) - .addPanel(maxShards) - .addPanel(minShards) - .addPanel(desiredShards) - ) - .addRow( - row.new('Shard Details') - .addPanel(shardsCapacity) - ) - .addRow( - row.new('Segments') - .addPanel(queueSegment) - ) - .addRow( - row.new('Misc. Rates') - .addPanel(enqueueRetries) - ), - - 'agent-tracing-pipeline.json': - local acceptedSpans = - graphPanel.new( - 'Accepted spans', - datasource='$datasource', - interval='1m', - span=3, - legend_show=false, - fill=0, - ) - .addTarget(prometheus.target( - ||| - rate(traces_receiver_accepted_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",receiver!="otlp/lb"}[$__rate_interval]) - |||, - legendFormat='{{ pod }} - {{ receiver }}/{{ transport }}', - )); - - local refusedSpans = - graphPanel.new( - 'Refused spans', - datasource='$datasource', - interval='1m', - span=3, - legend_show=false, - fill=0, - ) - .addTarget(prometheus.target( - ||| - rate(traces_receiver_refused_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",receiver!="otlp/lb"}[$__rate_interval]) - |||, - legendFormat='{{ pod }} - {{ receiver }}/{{ transport }}', - )); - - local sentSpans = - graphPanel.new( - 'Exported spans', - datasource='$datasource', - interval='1m', - span=3, - legend_show=false, - fill=0, - ) - .addTarget(prometheus.target( - ||| - rate(traces_exporter_sent_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",exporter!="otlp"}[$__rate_interval]) - |||, - legendFormat='{{ pod }} - {{ exporter }}', - )); - - local exportedFailedSpans = - graphPanel.new( - 'Exported failed spans', - datasource='$datasource', - interval='1m', - span=3, - legend_show=false, - fill=0, - ) - .addTarget(prometheus.target( - ||| - rate(traces_exporter_send_failed_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",exporter!="otlp"}[$__rate_interval]) - |||, - legendFormat='{{ pod }} - {{ exporter }}', - )); - - local receivedSpans(receiverFilter, width) = - graphPanel.new( - 'Received spans', - datasource='$datasource', - interval='1m', - span=width, - fill=1, - ) - .addTarget(prometheus.target( - ||| - sum(rate(traces_receiver_accepted_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",%s}[$__rate_interval])) - ||| % receiverFilter, - legendFormat='Accepted', - )) - .addTarget(prometheus.target( - ||| - sum(rate(traces_receiver_refused_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",%s}[$__rate_interval])) - ||| % receiverFilter, - legendFormat='Refused', - )); - - local exportedSpans(exporterFilter, width) = - graphPanel.new( - 'Exported spans', - datasource='$datasource', - interval='1m', - span=width, - fill=1, - ) - .addTarget(prometheus.target( - ||| - sum(rate(traces_exporter_sent_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",%s}[$__rate_interval])) - ||| % exporterFilter, - legendFormat='Sent', - )) - .addTarget(prometheus.target( - ||| - sum(rate(traces_exporter_send_failed_spans{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod",%s}[$__rate_interval])) - ||| % exporterFilter, - legendFormat='Send failed', - )); - - local loadBalancedSpans = - graphPanel.new( - 'Load-balanced spans', - datasource='$datasource', - interval='1m', - span=3, - fill=1, - stack=true, - ) - .addTarget(prometheus.target( - ||| - rate(traces_loadbalancer_backend_outcome{cluster=~"$cluster",namespace=~"$namespace",success="true",container=~"$container",pod=~"$pod"}[$__rate_interval]) - |||, - legendFormat='{{ pod }}', - )); - - local peersNum = - graphPanel.new( - 'Number of peers', - datasource='$datasource', - interval='1m', - span=3, - legend_show=false, - fill=0, - ) - .addTarget(prometheus.target( - ||| - traces_loadbalancer_num_backends{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"} - |||, - legendFormat='{{ pod }}', - )); - - dashboard.new('Agent Tracing Pipeline', tags=['grafana-agent-mixin'], editable=true, refresh='30s', time_from='now-1h') - .addTemplate( - { - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(agent_build_info, cluster)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'namespace', - '$datasource', - 'label_values(agent_build_info, namespace)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'container', - '$datasource', - 'label_values(agent_build_info, container)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'pod', - '$datasource', - 'label_values(agent_build_info{container=~"$container"}, pod)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addRow( - row.new('Write / Read') - .addPanel(acceptedSpans) - .addPanel(refusedSpans) - .addPanel(sentSpans) - .addPanel(exportedFailedSpans) - .addPanel(receivedSpans('receiver!="otlp/lb"', 6)) - .addPanel(exportedSpans('exporter!="otlp"', 6)) - ) - .addRow( - row.new('Load balancing') - .addPanel(loadBalancedSpans) - .addPanel(peersNum) - .addPanel(receivedSpans('receiver="otlp/lb"', 3)) - .addPanel(exportedSpans('exporter="otlp"', 3)) - ), - - 'agent-logs-pipeline.json': - local sumByPodRateCounter(title, metric, format='short') = - graphPanel.new( - title, - datasource='$datasource', - interval='1m', - span=6, - fill=1, - stack=true, - format=format - ) - .addTarget(prometheus.target( - ||| - sum by($groupBy) (rate(%s{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval])) - ||| % [metric], - legendFormat='{{$groupBy}}', - )); - - local sumByPodGague(title, metric) = - graphPanel.new( - title, - datasource='$datasource', - interval='1m', - span=6, - fill=1, - stack=true, - ) - .addTarget(prometheus.target( - ||| - sum by($groupBy) (%s{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}) - ||| % [metric], - legendFormat='{{$groupBy}}', - )); - - local requestSuccessRate() = - graphPanel.new( - 'Write requests success rate [%]', - datasource='$datasource', - interval='1m', - fill=0, - span=6, - format='%', - ) - .addTarget(prometheus.target( - ||| - sum by($groupBy) (rate(promtail_request_duration_seconds_bucket{status_code=~"2..", cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval])) - / - sum by($groupBy) (rate(promtail_request_duration_seconds_bucket{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval])) - * 100 - |||, - legendFormat='{{$groupBy}}', - )); - - local histogramQuantile(title, metric, q) = - graphPanel.new( - title, - datasource='$datasource', - interval='1m', - span=6, - fill=0, - format='s', - ) - .addTarget(prometheus.target( - ||| - histogram_quantile( - %f, - sum by (le, $groupBy) - (rate(%s{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval])) - ) - ||| % [q, metric], - legendFormat='{{$groupBy}}', - )); - - local histogramAverage(title, metric) = - graphPanel.new( - title, - datasource='$datasource', - interval='1m', - span=6, - fill=0, - format='s', - ) - .addTarget(prometheus.target( - ||| - (sum by (le, $groupBy) (rate(%s_sum{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval]))) - / - (sum by (le, $groupBy) (rate(%s_count{cluster=~"$cluster",namespace=~"$namespace",container=~"$container",pod=~"$pod"}[$__rate_interval]))) - ||| % [metric, metric], - legendFormat='{{$groupBy}}', - )); - - - dashboard.new('Agent Logs Pipeline', tags=['grafana-agent-mixin'], editable=true, refresh='30s', time_from='now-1h') - .addTemplate( - { - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(agent_build_info, cluster)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'namespace', - '$datasource', - 'label_values(agent_build_info, namespace)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'container', - '$datasource', - 'label_values(agent_build_info, container)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.new( - 'pod', - '$datasource', - 'label_values(agent_build_info{container=~"$container"}, pod)', - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ), - ) - .addTemplate( - template.custom( - 'groupBy', - 'pod,cluster,namespace', - 'pod', - ), - ) - .addRow( - row.new('Errors', height=500) - .addPanel(sumByPodRateCounter('Dropped bytes rate [B/s]', 'promtail_dropped_bytes_total', format='Bps')) - .addPanel(requestSuccessRate()) - ) - .addRow( - row.new('Latencies', height=500) - .addPanel(histogramQuantile('Write latencies p99 [s]', 'promtail_request_duration_seconds_bucket', 0.99)) - .addPanel(histogramQuantile('Write latencies p90 [s]', 'promtail_request_duration_seconds_bucket', 0.90)) - .addPanel(histogramQuantile('Write latencies p50 [s]', 'promtail_request_duration_seconds_bucket', 0.50)) - .addPanel(histogramAverage('Write latencies average [s]', 'promtail_request_duration_seconds')) - ) - .addRow( - row.new('Logs volume', height=500) - .addPanel(sumByPodRateCounter('Bytes read rate [B/s]', 'promtail_read_bytes_total', format='Bps')) - .addPanel(sumByPodRateCounter('Lines read rate [lines/s]', 'promtail_read_lines_total')) - .addPanel(sumByPodGague('Active files count', 'promtail_files_active_total')) - .addPanel(sumByPodRateCounter('Entries sent rate [entries/s]', 'promtail_sent_entries_total')) - ), - }, -} diff --git a/operations/agent-static-mixin/debugging.libsonnet b/operations/agent-static-mixin/debugging.libsonnet deleted file mode 100644 index 711184d333..0000000000 --- a/operations/agent-static-mixin/debugging.libsonnet +++ /dev/null @@ -1,128 +0,0 @@ -local utils = import './utils.libsonnet'; -local g = import 'grafana-builder/grafana.libsonnet'; - -{ - grafanaDashboards+:: { - 'agent-operational.json': - utils.injectUtils(g.dashboard('Agent Operational')) - .addMultiTemplate('cluster', 'agent_build_info', 'cluster') - .addMultiTemplate('namespace', 'agent_build_info{cluster=~"$cluster"}', 'namespace') - .addMultiTemplate('container', 'agent_build_info{cluster=~"$cluster", namespace="$namespace"}', 'container') - .addMultiTemplate('pod', 'agent_build_info{cluster=~"$cluster", namespace="$namespace", container="$container"}', 'pod') - .addRow( - g.row('General') - .addPanel( - g.panel('GCs [count/s]') + - g.queryPanel( - 'rate(go_gc_duration_seconds_count{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}[5m])', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Go Heap In Use') + - { yaxes: g.yaxes('decbytes') } + - g.queryPanel( - 'go_memstats_heap_inuse_bytes{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Goroutines') + - g.queryPanel( - 'go_goroutines{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}', - '{{pod}}', - ) - ) - .addPanel( - g.panel('CPU Usage [time/s]') + - g.queryPanel( - 'rate(container_cpu_usage_seconds_total{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}[5m])', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Working Set Size') + - { yaxes: g.yaxes('decbytes') } + - g.queryPanel( - 'container_memory_working_set_bytes{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Promtail Bad Words') + - g.queryPanel( - 'rate(promtail_custom_bad_words_total{cluster=~"$cluster", exported_namespace=~"$namespace", exported_job=~"$job"}[5m])', - '{{job}}', - ) - ) - ) - .addRow( - g.row('Network') - .addPanel( - g.panel('Received Bytes [B/s]') + - { yaxes: g.yaxes('Bps') } + - g.queryPanel( - 'sum by (pod) (rate(container_network_receive_bytes_total{cluster=~"$cluster", namespace=~"$namespace", pod=~"$pod"}[5m]))', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Transmitted Bytes [B/s]') + - { yaxes: g.yaxes('Bps') } + - g.queryPanel( - 'sum by (pod) (rate(container_network_transmit_bytes_total{cluster=~"$cluster", namespace=~"$namespace", pod=~"$pod"}[5m]))', - '{{pod}}', - ) - ) - ) - .addRow( - g.row('Prometheus Read') - .addPanel( - g.panel('Heap Used per Series per Pod') + - { yaxes: g.yaxes('decbytes') } + - g.queryPanel( - ||| - (sum by (pod) (avg_over_time(go_memstats_heap_inuse_bytes{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}[1m]))) - / - (sum by (pod) (agent_wal_storage_active_series{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"})) - |||, - '{{pod}}', - ) - ) - .addPanel( - g.panel('Avg Heap Used per Series') + - { yaxes: g.yaxes('decbytes') } + - g.queryPanel( - ||| - (sum by (container) (avg_over_time(go_memstats_heap_inuse_bytes{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"}[1m]))) - / - (sum by (container) (agent_wal_storage_active_series{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"})) - |||, - '{{container}}', - ) - ) - .addPanel( - g.panel('Series Count per Pod') + - g.queryPanel( - 'sum by (pod) (agent_wal_storage_active_series{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"})', - '{{pod}}', - ) - ) - .addPanel( - g.panel('Series per Config') + - g.queryPanel( - 'sum by (instance_group_name) (agent_wal_storage_active_series{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"})', - '{{instance_group_name}}', - ) - ) - .addPanel( - g.panel('Total Series') + - g.queryPanel( - 'sum by (container) (agent_wal_storage_active_series{cluster=~"$cluster", namespace=~"$namespace", container=~"$container", pod=~"$pod"})', - '{{container}}', - ) - ) - ), - }, -} - diff --git a/operations/agent-static-mixin/jsonnetfile.json b/operations/agent-static-mixin/jsonnetfile.json deleted file mode 100644 index e5a27a96ad..0000000000 --- a/operations/agent-static-mixin/jsonnetfile.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "dependencies": [ - { - "name": "grafana-builder", - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs", - "subdir": "grafana-builder" - } - }, - "version": "master" - }, - { - "name": "grafonnet", - "source": { - "git": { - "remote": "https://github.com/grafana/grafonnet-lib", - "subdir": "grafonnet" - } - }, - "version": "master" - } - ] -} - diff --git a/operations/agent-static-mixin/mixin.libsonnet b/operations/agent-static-mixin/mixin.libsonnet deleted file mode 100644 index 2a015d6816..0000000000 --- a/operations/agent-static-mixin/mixin.libsonnet +++ /dev/null @@ -1,4 +0,0 @@ -{ grafanaDashboardFolder: 'Grafana Agent' } -+ (import 'dashboards.libsonnet') -+ (import 'debugging.libsonnet') -+ (import 'alerts.libsonnet') diff --git a/operations/agent-static-mixin/utils.libsonnet b/operations/agent-static-mixin/utils.libsonnet deleted file mode 100644 index 5467553a7d..0000000000 --- a/operations/agent-static-mixin/utils.libsonnet +++ /dev/null @@ -1,34 +0,0 @@ -{ - injectUtils(dashboard):: dashboard { - tags: ['grafana-agent-mixin'], - refresh: '30s', - addMultiTemplateWithAll(name, metric_name, label_name, all='.*', hide=0):: self { - templating+: { - list+: [{ - allValue: all, - current: { - selected: true, - text: 'All', - value: '$__all', - }, - datasource: '$datasource', - hide: hide, - includeAll: true, - label: name, - multi: true, - name: name, - options: [], - query: 'label_values(%s, %s)' % [metric_name, label_name], - refresh: 1, - regex: '', - sort: 2, - tagValuesQuery: '', - tags: [], - tagsQuery: '', - type: 'query', - useTags: false, - }], - }, - }, - }, -} diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml deleted file mode 100644 index 153677bb17..0000000000 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml +++ /dev/null @@ -1,424 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: podmonitors.monitoring.coreos.com -spec: - group: monitoring.coreos.com - names: - categories: - - prometheus-operator - kind: PodMonitor - listKind: PodMonitorList - plural: podmonitors - shortNames: - - pmon - singular: podmonitor - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - attachMetadata: - properties: - node: - type: boolean - type: object - jobLabel: - type: string - labelLimit: - format: int64 - type: integer - labelNameLengthLimit: - format: int64 - type: integer - labelValueLengthLimit: - format: int64 - type: integer - namespaceSelector: - properties: - any: - type: boolean - matchNames: - items: - type: string - type: array - type: object - podMetricsEndpoints: - items: - properties: - authorization: - properties: - credentials: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerTokenSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - enableHttp2: - type: boolean - filterRunning: - type: boolean - followRedirects: - type: boolean - honorLabels: - type: boolean - honorTimestamps: - type: boolean - interval: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - metricRelabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - params: - additionalProperties: - items: - type: string - type: array - type: object - path: - type: string - port: - type: string - proxyUrl: - type: string - relabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - scheme: - enum: - - http - - https - type: string - scrapeTimeout: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - targetPort: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - insecureSkipVerify: - type: boolean - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - type: object - type: array - podTargetLabels: - items: - type: string - type: array - sampleLimit: - format: int64 - type: integer - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - targetLimit: - format: int64 - type: integer - required: - - podMetricsEndpoints - - selector - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml deleted file mode 100644 index 13fc36f9aa..0000000000 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml +++ /dev/null @@ -1,458 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: probes.monitoring.coreos.com -spec: - group: monitoring.coreos.com - names: - categories: - - prometheus-operator - kind: Probe - listKind: ProbeList - plural: probes - shortNames: - - prb - singular: probe - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - authorization: - properties: - credentials: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerTokenSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - interval: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - jobName: - type: string - labelLimit: - format: int64 - type: integer - labelNameLengthLimit: - format: int64 - type: integer - labelValueLengthLimit: - format: int64 - type: integer - metricRelabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - module: - type: string - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - prober: - properties: - path: - default: /probe - type: string - proxyUrl: - type: string - scheme: - enum: - - http - - https - type: string - url: - type: string - required: - - url - type: object - sampleLimit: - format: int64 - type: integer - scrapeTimeout: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - targetLimit: - format: int64 - type: integer - targets: - properties: - ingress: - properties: - namespaceSelector: - properties: - any: - type: boolean - matchNames: - items: - type: string - type: array - type: object - relabelingConfigs: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - type: object - staticConfig: - properties: - labels: - additionalProperties: - type: string - type: object - relabelingConfigs: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - static: - items: - type: string - type: array - type: object - type: object - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - insecureSkipVerify: - type: boolean - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml deleted file mode 100644 index ff62f8f267..0000000000 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml +++ /dev/null @@ -1,436 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: servicemonitors.monitoring.coreos.com -spec: - group: monitoring.coreos.com - names: - categories: - - prometheus-operator - kind: ServiceMonitor - listKind: ServiceMonitorList - plural: servicemonitors - shortNames: - - smon - singular: servicemonitor - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - attachMetadata: - properties: - node: - type: boolean - type: object - endpoints: - items: - properties: - authorization: - properties: - credentials: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerTokenFile: - type: string - bearerTokenSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - enableHttp2: - type: boolean - filterRunning: - type: boolean - followRedirects: - type: boolean - honorLabels: - type: boolean - honorTimestamps: - type: boolean - interval: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - metricRelabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - params: - additionalProperties: - items: - type: string - type: array - type: object - path: - type: string - port: - type: string - proxyUrl: - type: string - relabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - scheme: - enum: - - http - - https - type: string - scrapeTimeout: - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - targetPort: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - type: object - type: array - jobLabel: - type: string - labelLimit: - format: int64 - type: integer - labelNameLengthLimit: - format: int64 - type: integer - labelValueLengthLimit: - format: int64 - type: integer - namespaceSelector: - properties: - any: - type: boolean - matchNames: - items: - type: string - type: array - type: object - podTargetLabels: - items: - type: string - type: array - sampleLimit: - format: int64 - type: integer - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - targetLabels: - items: - type: string - type: array - targetLimit: - format: int64 - type: integer - required: - - endpoints - - selector - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml deleted file mode 100644 index 4ec31d67a4..0000000000 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml +++ /dev/null @@ -1,3711 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: grafanaagents.monitoring.grafana.com -spec: - group: monitoring.grafana.com - names: - categories: - - agent-operator - kind: GrafanaAgent - listKind: GrafanaAgentList - plural: grafanaagents - singular: grafanaagent - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - type: array - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - apiServer: - properties: - authorization: - properties: - credentials: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - credentialsFile: - type: string - type: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerToken: - type: string - bearerTokenFile: - type: string - host: - type: string - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - required: - - host - type: object - configMaps: - items: - type: string - type: array - configReloaderImage: - type: string - configReloaderVersion: - type: string - containers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - default: TCP - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - resizePolicy: - items: - properties: - resourceName: - type: string - restartPolicy: - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - disableReporting: - default: false - type: boolean - disableSupportBundle: - default: false - type: boolean - enableConfigReadAPI: - default: false - type: boolean - image: - type: string - imagePullSecrets: - items: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - type: array - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - default: TCP - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - resizePolicy: - items: - properties: - resourceName: - type: string - restartPolicy: - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - integrations: - properties: - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - type: object - logFormat: - type: string - logLevel: - type: string - logs: - properties: - clients: - items: - properties: - backoffConfig: - properties: - maxPeriod: - type: string - maxRetries: - type: integer - minPeriod: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - batchSize: - type: integer - batchWait: - type: string - bearerToken: - type: string - bearerTokenFile: - type: string - externalLabels: - additionalProperties: - type: string - type: object - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - proxyUrl: - type: string - tenantId: - type: string - timeout: - type: string - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - url: - type: string - required: - - url - type: object - type: array - enforcedNamespaceLabel: - type: string - ignoreNamespaceSelectors: - type: boolean - instanceNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - instanceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - logsExternalLabelName: - type: string - type: object - metrics: - properties: - arbitraryFSAccessThroughSMs: - properties: - deny: - type: boolean - type: object - enforcedNamespaceLabel: - type: string - enforcedSampleLimit: - format: int64 - type: integer - enforcedTargetLimit: - format: int64 - type: integer - externalLabels: - additionalProperties: - type: string - type: object - ignoreNamespaceSelectors: - type: boolean - instanceNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - instanceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - metricsExternalLabelName: - type: string - overrideHonorLabels: - type: boolean - overrideHonorTimestamps: - type: boolean - remoteWrite: - items: - properties: - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerToken: - type: string - bearerTokenFile: - type: string - headers: - additionalProperties: - type: string - type: object - metadataConfig: - properties: - send: - type: boolean - sendInterval: - type: string - type: object - name: - type: string - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - proxyUrl: - type: string - queueConfig: - properties: - batchSendDeadline: - type: string - capacity: - type: integer - maxBackoff: - type: string - maxRetries: - type: integer - maxSamplesPerSend: - type: integer - maxShards: - type: integer - minBackoff: - type: string - minShards: - type: integer - retryOnRateLimit: - type: boolean - type: object - remoteTimeout: - type: string - sigv4: - properties: - accessKey: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - profile: - type: string - region: - type: string - roleARN: - type: string - secretKey: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - url: - type: string - writeRelabelConfigs: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - required: - - url - type: object - type: array - replicaExternalLabelName: - type: string - replicas: - format: int32 - type: integer - scrapeInterval: - type: string - scrapeTimeout: - type: string - shards: - format: int32 - type: integer - type: object - nodeSelector: - additionalProperties: - type: string - type: object - paused: - type: boolean - podMetadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - name: - type: string - type: object - portName: - type: string - priorityClassName: - type: string - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - runtimeClassName: - type: string - secrets: - items: - type: string - type: array - securityContext: - properties: - fsGroup: - format: int64 - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - serviceAccountName: - type: string - storage: - properties: - disableMountSubPath: - type: boolean - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - required: - - spec - type: object - type: object - volumeClaimTemplate: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - name: - type: string - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - status: - properties: - accessModes: - items: - type: string - type: array - allocatedResourceStatuses: - additionalProperties: - type: string - type: object - x-kubernetes-map-type: granular - allocatedResources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - conditions: - items: - properties: - lastProbeTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - phase: - type: string - type: object - type: object - type: object - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - topologySpreadConstraints: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - format: int32 - type: integer - minDomains: - format: int32 - type: integer - nodeAffinityPolicy: - type: string - nodeTaintsPolicy: - type: string - topologyKey: - type: string - whenUnsatisfiable: - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - version: - type: string - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - required: - - spec - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - type: object - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml deleted file mode 100644 index 960b2f73ac..0000000000 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml +++ /dev/null @@ -1,810 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: integrations.monitoring.grafana.com -spec: - group: monitoring.grafana.com - names: - categories: - - agent-operator - kind: Integration - listKind: IntegrationList - plural: integrations - singular: integration - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - config: - type: object - x-kubernetes-preserve-unknown-fields: true - configMaps: - items: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - name: - type: string - secrets: - items: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - type: - properties: - allNodes: - type: boolean - unique: - type: boolean - type: object - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - resources: - properties: - claims: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - required: - - spec - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - config - - name - - type - type: object - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml deleted file mode 100644 index 517bb30c2e..0000000000 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml +++ /dev/null @@ -1,299 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: logsinstances.monitoring.grafana.com -spec: - group: monitoring.grafana.com - names: - categories: - - agent-operator - kind: LogsInstance - listKind: LogsInstanceList - plural: logsinstances - singular: logsinstance - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - additionalScrapeConfigs: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - clients: - items: - properties: - backoffConfig: - properties: - maxPeriod: - type: string - maxRetries: - type: integer - minPeriod: - type: string - type: object - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - batchSize: - type: integer - batchWait: - type: string - bearerToken: - type: string - bearerTokenFile: - type: string - externalLabels: - additionalProperties: - type: string - type: object - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - proxyUrl: - type: string - tenantId: - type: string - timeout: - type: string - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - url: - type: string - required: - - url - type: object - type: array - podLogsNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - podLogsSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - targetConfig: - properties: - syncPeriod: - type: string - type: object - type: object - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml deleted file mode 100644 index 610193f440..0000000000 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml +++ /dev/null @@ -1,495 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: metricsinstances.monitoring.grafana.com -spec: - group: monitoring.grafana.com - names: - categories: - - agent-operator - kind: MetricsInstance - listKind: MetricsInstanceList - plural: metricsinstances - singular: metricsinstance - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - additionalScrapeConfigs: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - maxWALTime: - type: string - minWALTime: - type: string - podMonitorNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - podMonitorSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - probeNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - probeSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - remoteFlushDeadline: - type: string - remoteWrite: - items: - properties: - basicAuth: - properties: - password: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - username: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - bearerToken: - type: string - bearerTokenFile: - type: string - headers: - additionalProperties: - type: string - type: object - metadataConfig: - properties: - send: - type: boolean - sendInterval: - type: string - type: object - name: - type: string - oauth2: - properties: - clientId: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - clientSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - endpointParams: - additionalProperties: - type: string - type: object - scopes: - items: - type: string - type: array - tokenUrl: - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - proxyUrl: - type: string - queueConfig: - properties: - batchSendDeadline: - type: string - capacity: - type: integer - maxBackoff: - type: string - maxRetries: - type: integer - maxSamplesPerSend: - type: integer - maxShards: - type: integer - minBackoff: - type: string - minShards: - type: integer - retryOnRateLimit: - type: boolean - type: object - remoteTimeout: - type: string - sigv4: - properties: - accessKey: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - profile: - type: string - region: - type: string - roleARN: - type: string - secretKey: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - tlsConfig: - properties: - ca: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - caFile: - type: string - cert: - properties: - configMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - secret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - certFile: - type: string - insecureSkipVerify: - type: boolean - keyFile: - type: string - keySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - serverName: - type: string - type: object - url: - type: string - writeRelabelConfigs: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - required: - - url - type: object - type: array - serviceMonitorNamespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - serviceMonitorSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - walTruncateFrequency: - type: string - writeStaleOnShutdown: - type: boolean - type: object - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml deleted file mode 100644 index f22d051b51..0000000000 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml +++ /dev/null @@ -1,308 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null - name: podlogs.monitoring.grafana.com -spec: - group: monitoring.grafana.com - names: - categories: - - agent-operator - kind: PodLogs - listKind: PodLogsList - plural: podlogs - singular: podlogs - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - jobLabel: - type: string - namespaceSelector: - properties: - any: - type: boolean - matchNames: - items: - type: string - type: array - type: object - pipelineStages: - items: - properties: - cri: - type: object - docker: - type: object - drop: - properties: - dropCounterReason: - type: string - expression: - type: string - longerThan: - type: string - olderThan: - type: string - source: - type: string - value: - type: string - type: object - json: - properties: - expressions: - additionalProperties: - type: string - type: object - source: - type: string - type: object - labelAllow: - items: - type: string - type: array - labelDrop: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - limit: - properties: - burst: - type: integer - drop: - type: boolean - rate: - type: integer - type: object - match: - properties: - action: - type: string - dropCounterReason: - type: string - pipelineName: - type: string - selector: - type: string - stages: - type: string - required: - - selector - type: object - metrics: - additionalProperties: - properties: - action: - type: string - buckets: - items: - type: string - type: array - countEntryBytes: - type: boolean - description: - type: string - matchAll: - type: boolean - maxIdleDuration: - type: string - prefix: - type: string - source: - type: string - type: - type: string - value: - type: string - required: - - action - - type - type: object - type: object - multiline: - properties: - firstLine: - type: string - maxLines: - type: integer - maxWaitTime: - type: string - required: - - firstLine - type: object - output: - properties: - source: - type: string - required: - - source - type: object - pack: - properties: - ingestTimestamp: - type: boolean - labels: - items: - type: string - type: array - required: - - labels - type: object - regex: - properties: - expression: - type: string - source: - type: string - required: - - expression - type: object - replace: - properties: - expression: - type: string - replace: - type: string - source: - type: string - required: - - expression - type: object - template: - properties: - source: - type: string - template: - type: string - required: - - source - - template - type: object - tenant: - properties: - label: - type: string - source: - type: string - value: - type: string - type: object - timestamp: - properties: - actionOnFailure: - type: string - fallbackFormats: - items: - type: string - type: array - format: - type: string - location: - type: string - source: - type: string - required: - - format - - source - type: object - type: object - type: array - podTargetLabels: - items: - type: string - type: array - relabelings: - items: - properties: - action: - default: replace - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - format: int64 - type: integer - regex: - type: string - replacement: - type: string - separator: - type: string - sourceLabels: - items: - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - type: string - type: object - type: array - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - x-kubernetes-map-type: atomic - required: - - selector - type: object - type: object - served: true - storage: true diff --git a/operations/agent-static-operator/templates/agent-operator.yaml b/operations/agent-static-operator/templates/agent-operator.yaml deleted file mode 100644 index ba8c08e75e..0000000000 --- a/operations/agent-static-operator/templates/agent-operator.yaml +++ /dev/null @@ -1,645 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: ${NAMESPACE} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-operator - namespace: ${NAMESPACE} ---- -apiVersion: v1 -automountServiceAccountToken: false -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - name: kube-state-metrics - namespace: ${NAMESPACE} ---- -apiVersion: v1 -data: {} -kind: Secret -metadata: - name: logs-secret - namespace: ${NAMESPACE} -stringData: - password: ${LOGS_KEY} - username: ${LOGS_USER} -type: Opaque ---- -apiVersion: v1 -data: {} -kind: Secret -metadata: - name: metrics-secret - namespace: ${NAMESPACE} -stringData: - password: ${METRICS_KEY} - username: ${METRICS_USER} -type: Opaque ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: agent-eventhandler - namespace: ${NAMESPACE} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-operator -rules: -- apiGroups: - - monitoring.grafana.com - resources: - - grafanaagents - - metricsinstances - - logsinstances - - podlogs - - integrations - verbs: - - get - - list - - watch -- apiGroups: - - monitoring.grafana.com - resources: - - grafanaagents/finalizers - - metricsinstances/finalizers - - logsinstances/finalizers - - podlogs/finalizers - - integrations/finalizers - verbs: - - get - - list - - watch - - update -- apiGroups: - - monitoring.coreos.com - resources: - - podmonitors - - probes - - servicemonitors - verbs: - - get - - list - - watch -- apiGroups: - - monitoring.coreos.com - resources: - - podmonitors/finalizers - - probes/finalizers - - servicemonitors/finalizers - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - namespaces - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - secrets - - services - - configmaps - - endpoints - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - apps - resources: - - statefulsets - - daemonsets - - deployments - verbs: - - get - - list - - watch - - create - - update - - patch - - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - name: kube-state-metrics -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - nodes - - pods - - services - - resourcequotas - - replicationcontrollers - - limitranges - - persistentvolumeclaims - - persistentvolumes - - namespaces - - endpoints - verbs: - - list - - watch -- apiGroups: - - apps - resources: - - statefulsets - - daemonsets - - deployments - - replicasets - verbs: - - list - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - list - - watch -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - list - - watch -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - list - - watch -- apiGroups: - - certificates.k8s.io - resources: - - certificatesigningrequests - verbs: - - list - - watch -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - - volumeattachments - verbs: - - list - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - - ingresses - verbs: - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: -- kind: ServiceAccount - name: grafana-agent - namespace: ${NAMESPACE} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-operator -subjects: -- kind: ServiceAccount - name: grafana-agent-operator - namespace: ${NAMESPACE} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - name: kube-state-metrics -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-state-metrics -subjects: -- kind: ServiceAccount - name: kube-state-metrics - namespace: ${NAMESPACE} ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - name: kube-state-metrics - namespace: ${NAMESPACE} -spec: - clusterIP: None - ports: - - name: http-metrics - port: 8080 - targetPort: http-metrics - - name: telemetry - port: 8081 - targetPort: telemetry - selector: - app.kubernetes.io/name: kube-state-metrics ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: grafana-agent-operator - namespace: ${NAMESPACE} -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: grafana-agent-operator - template: - metadata: - labels: - name: grafana-agent-operator - spec: - containers: - - args: - - --kubelet-service=default/kubelet - image: grafana/agent-operator:v0.37.4 - imagePullPolicy: IfNotPresent - name: grafana-agent-operator - serviceAccount: grafana-agent-operator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - name: kube-state-metrics - namespace: ${NAMESPACE} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: kube-state-metrics - template: - metadata: - labels: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 2.5.0 - spec: - automountServiceAccountToken: true - containers: - - image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.5.0 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - timeoutSeconds: 5 - name: kube-state-metrics - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 8081 - name: telemetry - readinessProbe: - httpGet: - path: / - port: 8081 - initialDelaySeconds: 5 - timeoutSeconds: 5 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsUser: 65534 - nodeSelector: - kubernetes.io/os: linux - serviceAccountName: kube-state-metrics ---- -apiVersion: monitoring.grafana.com/v1alpha1 -kind: GrafanaAgent -metadata: - name: grafana-agent - namespace: ${NAMESPACE} -spec: - image: grafana/agent:v0.37.4 - integrations: - selector: - matchLabels: - agent: grafana-agent - logs: - instanceSelector: - matchLabels: - agent: grafana-agent - metrics: - externalLabels: - cluster: ${CLUSTER} - instanceSelector: - matchLabels: - agent: grafana-agent - serviceAccountName: grafana-agent ---- -apiVersion: monitoring.grafana.com/v1alpha1 -kind: Integration -metadata: - labels: - agent: grafana-agent - name: agent-eventhandler - namespace: ${NAMESPACE} -spec: - config: - cache_path: /etc/eventhandler/eventhandler.cache - logs_instance: ${NAMESPACE}/grafana-agent-logs - name: eventhandler - type: - unique: true - volumeMounts: - - mountPath: /etc/eventhandler - name: agent-eventhandler - volumes: - - name: agent-eventhandler - persistentVolumeClaim: - claimName: agent-eventhandler ---- -apiVersion: monitoring.grafana.com/v1alpha1 -kind: LogsInstance -metadata: - labels: - agent: grafana-agent - name: grafana-agent-logs - namespace: ${NAMESPACE} -spec: - clients: - - basicAuth: - password: - key: password - name: logs-secret - username: - key: username - name: logs-secret - externalLabels: - cluster: ${CLUSTER} - url: ${LOGS_URL} - podLogsNamespaceSelector: {} - podLogsSelector: - matchLabels: - instance: primary ---- -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - labels: - agent: grafana-agent - name: grafana-agent-metrics - namespace: ${NAMESPACE} -spec: - remoteWrite: - - basicAuth: - password: - key: password - name: metrics-secret - username: - key: username - name: metrics-secret - url: ${METRICS_URL} - serviceMonitorNamespaceSelector: {} - serviceMonitorSelector: - matchLabels: - instance: primary ---- -apiVersion: monitoring.grafana.com/v1alpha1 -kind: PodLogs -metadata: - labels: - instance: primary - name: kubernetes-logs - namespace: ${NAMESPACE} -spec: - namespaceSelector: - any: true - pipelineStages: - - cri: {} - relabelings: - - sourceLabels: - - __meta_kubernetes_pod_node_name - targetLabel: __host__ - - action: replace - sourceLabels: - - __meta_kubernetes_namespace - targetLabel: namespace - - action: replace - sourceLabels: - - __meta_kubernetes_pod_name - targetLabel: pod - - action: replace - sourceLabels: - - __meta_kubernetes_pod_container_name - targetLabel: container - - replacement: /var/log/pods/*$1/*.log - separator: / - sourceLabels: - - __meta_kubernetes_pod_uid - - __meta_kubernetes_pod_container_name - targetLabel: __path__ - selector: - matchLabels: {} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - instance: primary - name: cadvisor-monitor - namespace: ${NAMESPACE} -spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - interval: 60s - path: /metrics/cadvisor - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - replacement: cadvisor - targetLabel: job - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - instance: primary - name: ksm-monitor - namespace: ${NAMESPACE} -spec: - endpoints: - - honorLabels: true - interval: 60s - path: /metrics - port: http-metrics - relabelings: - - action: replace - replacement: kube-state-metrics - targetLabel: job - namespaceSelector: - matchNames: - - ${NAMESPACE} - selector: - matchLabels: - app.kubernetes.io/name: kube-state-metrics ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - instance: primary - name: kubelet-monitor - namespace: ${NAMESPACE} -spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - interval: 60s - path: /metrics - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - replacement: kubelet - targetLabel: job - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet diff --git a/packaging/grafana-agent-flow/deb/control/postinst b/packaging/grafana-agent-flow/deb/control/postinst deleted file mode 100644 index 54b4fee03d..0000000000 --- a/packaging/grafana-agent-flow/deb/control/postinst +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh - -set -e - -# shellcheck disable=SC1091 -[ -f /etc/default/grafana-agent-flow ] && . /etc/default/grafana-agent-flow - -# NOTE: the grafana-agent group is shared with the grafana-agent package to -# make it easier to migrate between the two. A unique user is still used to -# give them different home directories. - -# Initial installation: $1 == configure -# Upgrade: $1 == configure, $2 == old version -case "$1" in - configure) - [ -z "$GRAFANA_AGENT_USER" ] && GRAFANA_AGENT_USER="grafana-agent-flow" - [ -z "$GRAFANA_AGENT_GROUP" ] && GRAFANA_AGENT_GROUP="grafana-agent" - if ! getent group "$GRAFANA_AGENT_GROUP" > /dev/null 2>&1 ; then - groupadd -r "$GRAFANA_AGENT_GROUP" - fi - if ! getent passwd "$GRAFANA_AGENT_USER" > /dev/null 2>&1 ; then - useradd -m -r -g "$GRAFANA_AGENT_GROUP" -d /var/lib/grafana-agent-flow -s /sbin/nologin -c "grafana-agent-flow user" "$GRAFANA_AGENT_USER" - fi - - # Add grafana agent user to groups used for reading logs. - if getent group adm > /dev/null 2>&1 ; then - usermod -a -G adm "$GRAFANA_AGENT_USER" - fi - if getent group systemd-journal > /dev/null 2>&1 ; then - usermod -a -G systemd-journal "$GRAFANA_AGENT_USER" - fi - - chown $GRAFANA_AGENT_USER:$GRAFANA_AGENT_GROUP /var/lib/grafana-agent-flow - chmod 770 /var/lib/grafana-agent-flow - - chmod 640 /etc/grafana-agent-flow.river - chown root:$GRAFANA_AGENT_GROUP /etc/grafana-agent-flow.river - - if [ -z ${2+x} ] && [ "$RESTART_ON_UPGRADE" = "true" ]; then - if command -v systemctl 2>/dev/null; then - systemctl daemon-reload - systemctl restart grafana-agent-flow - fi - fi -esac diff --git a/packaging/grafana-agent-flow/deb/control/prerm b/packaging/grafana-agent-flow/deb/control/prerm deleted file mode 100644 index 782a3d6829..0000000000 --- a/packaging/grafana-agent-flow/deb/control/prerm +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -set -e - -# shellcheck disable=SC1091 -[ -f /etc/default/grafana-agent-flow ] && . /etc/default/grafana-agent-flow - -if [ "$1" = "remove" ]; then - if command -v systemctl 2>/dev/null; then - systemctl stop grafana-agent-flow.service > /dev/null 2>&1 || : - fi -fi diff --git a/packaging/grafana-agent-flow/deb/grafana-agent-flow.service b/packaging/grafana-agent-flow/deb/grafana-agent-flow.service deleted file mode 100644 index afca019731..0000000000 --- a/packaging/grafana-agent-flow/deb/grafana-agent-flow.service +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Vendor-neutral programmable observability pipelines. -Documentation=https://grafana.com/docs/agent/latest/flow/ -Wants=network-online.target -After=network-online.target - -[Service] -Restart=always -User=grafana-agent-flow -Environment=HOSTNAME=%H -Environment=AGENT_DEPLOY_MODE=deb -EnvironmentFile=/etc/default/grafana-agent-flow -WorkingDirectory=/var/lib/grafana-agent-flow -ExecStart=/usr/bin/grafana-agent-flow run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent-flow $CONFIG_FILE -ExecReload=/usr/bin/env kill -HUP $MAINPID -TimeoutStopSec=20s -SendSIGKILL=no - -[Install] -WantedBy=multi-user.target diff --git a/packaging/grafana-agent-flow/environment-file b/packaging/grafana-agent-flow/environment-file deleted file mode 100644 index 9f6e202092..0000000000 --- a/packaging/grafana-agent-flow/environment-file +++ /dev/null @@ -1,16 +0,0 @@ -## Path: -## Description: Grafana Agent Flow settings -## Type: string -## Default: "" -## ServiceRestart: grafana-agent-flow -# -# Command line options for grafana-agent -# -# The configuration file holding the agent config. -CONFIG_FILE="/etc/grafana-agent-flow.river" - -# User-defined arguments to pass to the run command. -CUSTOM_ARGS="" - -# Restart on system upgrade. Defaults to true. -RESTART_ON_UPGRADE=true diff --git a/packaging/grafana-agent-flow/rpm/control/postinst b/packaging/grafana-agent-flow/rpm/control/postinst deleted file mode 100644 index c29c7cb48b..0000000000 --- a/packaging/grafana-agent-flow/rpm/control/postinst +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -set -e - -# NOTE: the grafana-agent group is shared with the grafana-agent package to -# make it easier to migrate between the two. A unique user is still used to -# give them different home directories. - -# shellcheck disable=SC1091 -[ -f /etc/sysconfig/grafana-agent-flow ] && . /etc/sysconfig/grafana-agent-flow -[ -z "$AGENT_USER" ] && AGENT_USER="grafana-agent-flow" -[ -z "$AGENT_GROUP" ] && AGENT_GROUP="grafana-agent" - -add_to_logging_groups() { - # Add grafana agent user to groups used for reading logs. - if getent group adm > /dev/null 2>&1 ; then - usermod -a -G adm "$AGENT_USER" - fi - if getent group systemd-journal > /dev/null 2>&1 ; then - usermod -a -G systemd-journal "$AGENT_USER" - fi -} - -# Initial installation: $1 == 1 -# Upgrade: $1 == 2, and configured to restart on upgrade -if [ "$1" -eq 1 ] ; then - if ! getent group "$AGENT_GROUP" > /dev/null 2>&1 ; then - groupadd -r "$AGENT_GROUP" - fi - if ! getent passwd "$AGENT_USER" > /dev/null 2>&1 ; then - useradd -r -m -g "$AGENT_GROUP" -d /var/lib/grafana-agent-flow -s /sbin/nologin -c "grafana-agent-flow user" "$AGENT_USER" - fi - - add_to_logging_groups - - chown $AGENT_USER:$AGENT_GROUP /var/lib/grafana-agent-flow - chmod 770 /var/lib/grafana-agent-flow - - chmod 640 /etc/grafana-agent-flow.river - chown root:$AGENT_GROUP /etc/grafana-agent-flow.river - -elif [ "$1" -ge 2 ] ; then - add_to_logging_groups - - if [ "$RESTART_ON_UPGRADE" = "true" ]; then - systemctl daemon-reload - systemctl restart grafana-agent-flow - fi -fi diff --git a/packaging/grafana-agent-flow/rpm/control/prerm b/packaging/grafana-agent-flow/rpm/control/prerm deleted file mode 100644 index f971b5fcf6..0000000000 --- a/packaging/grafana-agent-flow/rpm/control/prerm +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -set -e - -# shellcheck disable=SC1091 -[ -f /etc/sysconfig/grafana-agent-flow ] && . /etc/sysconfig/grafana-agent-flow - -# final uninstallation $1=0 -# If other copies of this RPM are installed, then $1>0 - -if [ "$1" -eq 0 ] ; then - if [ -x /bin/systemctl ] ; then - /bin/systemctl stop grafana-agent-flow.service > /dev/null 2>&1 || : - elif [ -x /etc/init.d/grafana-agent-flow ] ; then - /etc/init.d/grafana-agent-flow stop - elif [ -x /etc/rc.d/init.d/grafana-agent-flow ] ; then - /etc/rc.d/init.d/grafana-agent-flow stop - fi -fi -exit 0 diff --git a/packaging/grafana-agent-flow/rpm/gpg-sign.sh b/packaging/grafana-agent-flow/rpm/gpg-sign.sh deleted file mode 100755 index d869d2d856..0000000000 --- a/packaging/grafana-agent-flow/rpm/gpg-sign.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -# We are not using fpm's signing functionality because it does not work anymore -# https://github.com/jordansissel/fpm/issues/1626 - -set -euxo pipefail -shopt -s extglob - -# Write GPG key to GPG keyring -printf "%s" "${GPG_PUBLIC_KEY}" > /tmp/gpg-public-key -gpg --import /tmp/gpg-public-key -printf "%s" "${GPG_PRIVATE_KEY}" | gpg --import --no-tty --batch --yes --passphrase "${GPG_PASSPHRASE}" - -rpm --import /tmp/gpg-public-key - -echo "%_gpg_name Grafana Labs -%_signature gpg -%_gpg_path /root/.gnupg -%_gpgbin /usr/bin/gpg -%_gpg_digest_algo sha256 -%_binary_filedigest_algorithm sha256 -%_source_filedigest_algorithm sha256 -%__gpg /usr/bin/gpg -%__gpg_sign_cmd %{__gpg} \ - gpg --no-tty --batch --yes --no-verbose --no-armor \ - --passphrase ${GPG_PASSPHRASE} \ - --pinentry-mode loopback \ - %{?_gpg_digest_algo:--digest-algo %{_gpg_digest_algo}} \ - --no-secmem-warning \ - -u \"%{_gpg_name}\" -sbo %{__signature_filename} %{__plaintext_filename} -" > ~/.rpmmacros - -for f in dist/*.rpm; do - rpm --addsign "${f}" - rpm --checksig "${f}" -done diff --git a/packaging/grafana-agent-flow/rpm/grafana-agent-flow.service b/packaging/grafana-agent-flow/rpm/grafana-agent-flow.service deleted file mode 100644 index 3309607905..0000000000 --- a/packaging/grafana-agent-flow/rpm/grafana-agent-flow.service +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Vendor-neutral programmable observability pipelines. -Documentation=https://grafana.com/docs/agent/latest/flow/ -Wants=network-online.target -After=network-online.target - -[Service] -Restart=always -User=grafana-agent-flow -Environment=HOSTNAME=%H -Environment=AGENT_DEPLOY_MODE=rpm -EnvironmentFile=/etc/sysconfig/grafana-agent-flow -WorkingDirectory=/var/lib/grafana-agent-flow -ExecStart=/usr/bin/grafana-agent-flow run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent-flow $CONFIG_FILE -ExecReload=/usr/bin/env kill -HUP $MAINPID -TimeoutStopSec=20s -SendSIGKILL=no - -[Install] -WantedBy=multi-user.target diff --git a/packaging/grafana-agent-flow/windows/install_script.nsis b/packaging/grafana-agent-flow/windows/install_script.nsis deleted file mode 100644 index 5e253890c2..0000000000 --- a/packaging/grafana-agent-flow/windows/install_script.nsis +++ /dev/null @@ -1,200 +0,0 @@ -# This script does the following: -# -# 1. Installs grafana-agent-flow-windows-amd64.exe, grafana-agent-service-amd64.exe, and logo.ico. -# 2. Creates a Start Menu shortcut. -# 3. Builds an uninstaller. -# 4. Adds uninstall information to the registry for Add/Remove Programs. -# 5. Initializes the registry with appropriate settings. - -Unicode true - -!include nsDialogs.nsh -!include FileFunc.nsh -!include .\macros.nsis - -!define APPNAME "Grafana Agent Flow" -!define HELPURL "https://grafana.com/docs/agent/latest/flow/" -!define UPDATEURL "https://github.com/grafana/agent/releases" -!define ABOUTURL "https://github.com/grafana/agent" - -# Because we modify the registry and install a service that runs as -# LocalSystem, we require admin permissions. -RequestExecutionLevel admin - -Name "${APPNAME} ${VERSION}" # Shown in title bar for installer/uninstaller -Icon "logo.ico" -InstallDir "$PROGRAMFILES64\${APPNAME}" -LicenseData ..\..\..\LICENSE -OutFile "${OUT}" - -# Everything must be global Vars -Var PassedInParameters -Var Config -Var ConfigFlag -Var Environment -Var DisableReporting -Var DisableReportingFlag -Var DisableProfiling -Var DisableProfilingFlag - -# Pages during the installer. -Page license -Page directory -Page instfiles - -# Automatically called when installing. -Function .onInit - SetShellVarContext all - !insertmacro VerifyUserIsAdmin -FunctionEnd - -Section "install" - ${GetParameters} $PassedInParameters - ${GetOptions} $PassedInParameters "/DISABLEPROFILING=" $DisableProfiling - ${GetOptions} $PassedInParameters "/DISABLEREPORTING=" $DisableReporting - ${GetOptions} $PassedInParameters "/ENVIRONMENT=" $Environment - ${GetOptions} $PassedInParameters "/CONFIG=" $Config - - # Calls to functions like nsExec::ExecToLog below push the exit code to the - # stack, and must be popped after calling. - - # Preemptively stop the existing service if it's running. - nsExec::ExecToLog 'sc stop "Grafana Agent Flow"' - Pop $0 - - # Configure the out path and copy files to it. - SetOutPath "$INSTDIR" - File "..\..\..\dist.temp\grafana-agent-flow-windows-amd64.exe" - File "..\..\..\dist.temp\grafana-agent-service-windows-amd64.exe" - File "logo.ico" - - # Create an uninstaller at the same pathFunctionEnd - WriteUninstaller "$INSTDIR\uninstall.exe" - - # Registry information for Add/Remote Programs. It's OK for this to - # overwriting existing registry entries since we want it to be relevant to - # the current installed version. - !define UNINSTALLKEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" - WriteRegStr HKLM "${UNINSTALLKEY}" "DisplayName" '${APPNAME} ${VERSION}' - WriteRegStr HKLM "${UNINSTALLKEY}" "UninstallString" '"$INSTDIR\uninstall.exe"' - WriteRegStr HKLM "${UNINSTALLKEY}" "QuietUninstallString" '"$INSTDIR\uninstall.exe" /S' - WriteRegStr HKLM "${UNINSTALLKEY}" "InstallLocation" '"$INSTDIR"' - WriteRegStr HKLM "${UNINSTALLKEY}" "DisplayIcon" '"$INSTDIR\logo.ico"' - WriteRegStr HKLM "${UNINSTALLKEY}" "Publisher" '"${ABOUTURL}"' - WriteRegStr HKLM "${UNINSTALLKEY}" "HelpLink" '"${HELPURL}"' - WriteRegStr HKLM "${UNINSTALLKEY}" "URLUpdateInfo" '"${UPDATEURL}"' - WriteRegStr HKLM "${UNINSTALLKEY}" "URLInfoAbout" '"${ABOUTURL}"' - WriteRegDWORD HKLM "${UNINSTALLKEY}" "NoModify" 1 - WriteRegDWORD HKLM "${UNINSTALLKEY}" "NoRepair" 1 - - Call CreateConfig - Call InitializeRegistry - - # Create the service. - nsExec::ExecToLog 'sc create "Grafana Agent Flow" start= delayed-auto binpath= "$INSTDIR\grafana-agent-service-windows-amd64.exe"' - Pop $0 - - # Start the service. - nsExec::ExecToLog 'sc start "Grafana Agent Flow"' - Pop $0 - - # Auto-restart agent on failure. Reset failure counter after 60 seconds without failure - nsExec::ExecToLog `sc failure "Grafana Agent Flow" reset= 60 actions= restart/5000 reboot= "Grafana Agent Flow has failed. Restarting in 5 seconds"` - Pop $0 -SectionEnd - -Function CreateConfig - IfFileExists "$INSTDIR\config.river" Noop CreateNewConfig - Noop: - Return - CreateNewConfig: - File "config.river" - Return -FunctionEnd - -# InitializeRegistry initializes the keys in the registry that the service -# runner uses. If the registry values already exist, they are not overwritten. -Function InitializeRegistry - !define REGKEY "HKLM\Software\Grafana\Grafana Agent Flow" - - # Define the default key, which points to the service. - nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /ve' - Pop $0 - ${If} $0 == 1 - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /ve /d "$INSTDIR\grafana-agent-flow-windows-amd64.exe"' - Pop $0 # Ignore return result - ${EndIf} - - ${If} $Config != "" - StrCpy $ConfigFlag "$Config" - ${Else} - StrCpy $ConfigFlag "$INSTDIR\config.river" - ${EndIf} - - ${If} $DisableReporting == "yes" - StrCpy $DisableReportingFlag "--disable-reporting\0" - ${Else} - StrCpy $DisableReportingFlag "" - ${EndIf} - - ${If} $DisableProfiling == "yes" - StrCpy $DisableProfilingFlag "--server.http.enable-pprof=false\0" - ${Else} - StrCpy $DisableProfilingFlag "" - ${EndIf} - - # Define the arguments key, which holds arguments to pass to the - # service. - nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Arguments' - Pop $0 - ${If} $0 == 1 - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Arguments /t REG_MULTI_SZ /d "run"\0"$ConfigFlag"\0"--storage.path=$APPDATA\${APPNAME}\data"\0"$DisableReportingFlag$DisableProfilingFlag"' - Pop $0 # Ignore return result - ${EndIf} - - nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Environment' - Pop $0 - ${If} $0 == 1 - # Define the environment key, which holds environment variables to pass to the - # service. - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' - Pop $0 # Ignore return result - ${EndIf} - - Return -FunctionEnd - -# Automatically called when uninstalling. -Function un.onInit - SetShellVarContext all - - IfSilent Noop WarnUser - Noop: - Return - WarnUser: - MessageBox MB_OKCANCEL "Permanently remove ${APPNAME}? This will remove all data for the application." IDOK Continue - Abort - Continue: - !insertmacro VerifyUserIsAdmin - Return -FunctionEnd - -Section "uninstall" - DetailPrint "Starting uninstaller." - - # Stop and remove service. - nsExec::ExecToLog 'sc stop "Grafana Agent Flow"' - Pop $0 - nsExec::ExecToLog 'sc delete "Grafana Agent Flow"' - Pop $0 - - RMDir /r "$SMPROGRAMS\${APPNAME}" # Start Menu folder. - RMDir /r "$INSTDIR" # Install directory. - RMDir /r "$APPDATA\${APPNAME}" # Application data. - - # Remove service and uninstaller information from the registry. - nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Grafana\Grafana Agent Flow" /reg:64 /f' - Pop $0 - nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" /reg:64 /f' - Pop $0 -SectionEnd diff --git a/packaging/grafana-agent-flow/windows/logo.ico b/packaging/grafana-agent-flow/windows/logo.ico deleted file mode 100644 index 1f36fdd3311b125c258a381ad0ad88603c3a9e4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15086 zcmd6u33yahmVj>(7DYD2RzOK5sU#J(!3E<+3pi@ye6FM8IE}P|Kmb5K%y)fC~ggk8LDYhh0IdD>L_)UQYtIU)E8qf#dg_cF+ESIKmJas0`{ScBTPF&DfT=2 zd=stn$F(&w%C#->I~WZ0TpvZw$5!O3h`i?79{nuFGFL^k8~x{I?ua(X{3JRI*$l?J zHb*8yOBk88DbfkK$@PBZ5E!lxY7?`pPWy%!muRUVmwEX{f|+&+6vcy9Ka;oFb_q!DK=#|m%RcssnG>ve)s zu2-17yeb=pWUr{|2qCD>e$B>8+QRD^1L@6PRdp>4g_b$5gtBuMg@)xUuJXX-oF!Ey zFkMrI6qd#*$$7PE0_4IlXz6}A)WZEzXo9;mbPw_y^u6#m_g_MLV6SFgXiv_((BE?A zhxWo>-Sa|s<}3(}=i1HO^QzL^bAo?%&knAIBXHFHLU56LZm1h_o%_YmYR=sieL3x8 zaESJ5u618QuYk&(5mb4jk<)^ma{oJc5cwXo&Yc=8qVLk&nZdl==Yz^UGuRxx0z&B5 z=Zz02uB&gOya@pXt>FMvz^uH9flbKq@GSI2FV3AD_&oQyz+T7$>lW14$A@$BM)|kH zj=Zsf4xHZ&*TRi?{}ot^tsin1Y|a}OXaw=&^Nal|{}~wSAD%zNUzI=DKPZ2o@6!B% z{@+8J{9*pC^i9qm?hogW@b{;!@}Bj_kC#UNGd`6+z}L$2wC_Wx20wT;&-lFbS98vC z&fOf6jupl8gim=MhbMejdiwg_hiWi1kNb{cFZVp@Yvbw1KK;Elj@RetjcqR4uL`Ieh5&JjPacqlB#rNfq zfo}@BfS+e&6mACvXYs7w4E=egHb<^-Z6emqoYTn`Y)<6ao{T*2Ut+(e#4r(7LtT7Q z&>ar&O#c?{YSV5rY4^8@~r0>Z(%;~t>?Ze-=p8jCTyD`)!2vNe|KW3 zq_<_C(o!7Z>p&QjW`con;oW!N0x z`&zgf6!eF3I8*eTg(0CC?0{}sUl3}+JmLn;LZp*dusKjHS4_77nCZpBc_KBeqm8v24*e#DOsNY%Pc zNWUZ9vzW)t3f6(Una?eToy_kHq(wD-i}5Qjr!-DasXKGNLyEdqDrUg~*^k5G#;*P|GAAv-{#P(!aU8~WNA67gp36NqVPDdAvlLArX!_1Z2&>qEH`51hPts}IBE1?hy=xA{T=AFKocp z0BXBz+%EI~oXP&QypjIr!4UFC`d{=6^VgB@3Fju?NO>cDDsPm3EQHZJ=8f>zr@b<7 zG{>7U6r(G?QJja@&|CN6du135_uqo70t5Weh3|`W`9^VWe7h)oYzw2gc2oT9i!BP` z`#xeQ1m|J3>U=9HXvp5Wj^|{HaSkwO-HJ zkw1Xzo8Mjkg4_p&W&m=4?=VE+V4Q)p2VxsY-$1T8C{885T@DhD^D@WPJx}pHHo%we z>F*l^hrvkVDfCo4joqXQpL<@?*DK$T$GuiM7|*pfuVT<>7*aF1XX-f z4#K~B_*~oRPs;9jjB$9(*MRTL{;-#CO#_+WG1_+e`o81awg>*z6aRmS&HVN~kj}Sn zABH8-8^2rom#E7}I~1Yo$8rMGqcO7u^pJ9GC`!L5b~DHg{$H3b#tz zoTJoDSvnJ$ILkQF1*sT8N}*h-jPg2J83)xhu7f#4U3~2)aFusB+60v9P_0xk7=|z+ z%`nO{48z~VFpd@*Ms=|v`OwRtE5!2@xheA$yZN?X0Chle74i+Q$VZHXuOSyWOYNJ^ zW61aa$I6PTUqQ+Lip4 z`(t^L_mPc2kp~gdU;#v-9Y_vHk*5$@fIfp9$v#NV``Gh6&bFEVksMGezQ;a_F}YBW zkL0)HUf34t92=AITG#m@;;M+cpc3}S@;~n*8-a?EhFm~?=ODBLvDb%(X#X#mIilp8 zP;!pQUJFRocE)bz!73P=+Zm&b*zbHlQu1KqW4e{Q%Ou`P*dMDAe1<$1RE&D$gH}Np zuFBpJNkzw|Nhk_lJl6z3>D1 zux}s?cYumjdj*qu( z;~-;Vk2}#WqPsn{D@gIR|a~vY`h^4X4y{#qxw>CV7oLm@= z&|Xds?uuC7#nT_G| zo@JmYD=sVRgIBb$zARNx8yKmOh6`bw$41QU3cbdoqAI4mUG5+elu%4>~X&l zE|Oesj6&{%FqA(Rrrt1+QqX+_Cgf8>-)gGKcw~_hBEGl46$3APL4(hfrzWQg6`B;O2;VJqwbHz@MaLLS@YGWaN$Kw_HYjOlz50HC9 zxCSCn1{sj>pPV~+_xkV_gyDKnJ8-3L!)(#VQy$KIsNn9rD3s6q`u&+JJi%u)lw{5s`xFN zJ?@(AUc@hTB)K0p#>S(EzBjnH?HhFn>J-6Cp%SX8BUHd*+FwAT=JF!#By_FiDDm$j z*2);c*f0=x!VK;c;7@TMvwz1D*?uA_-_do?)2zJ+Xn|cdz!t7vYYBE295ob2- z+L#mkm^gb8V;kbif=t@~hV3V?;+C3HN5~|eENDyn8TS1;#aKw~$wcZ+3BDkf8_{)b ztwsPCq*jwNE0~VFf_lkNYBbLycYu*ZqW<&-G3J2Osp8|1^D=dxS-}Y)V{nYvdrQBJ zk!SD}Momk> z1Tf-EqdhHH4Ubdrl67FRPgCS3_zJSLtt6O1JU2jFsc+e9VU|BS?wQ0pBREx$&kW9; zjI1fMcK?T^uFXvejE>d144eD`-#yxYYG2g9I?|>VMqE<^JuUmmTu{e4jX0+Tt3bvk z!SrBP=|9zt`k10N)(@oKXHdVhc$apgyypVx`I7=^w8!bb=jeYf@CNOM(gzuC8#$8$ z>DjEK%bFfgIa8?DP6=dWKj%*)=1lkssBPlE$r0q8G8U)0QTLN|e*XbS!LYH1_6k_0 z<;1{VYJz^)17TPL%aWKxeQ^>syh*I}n-sVT7BMEvVHvCRi(wb9Z3 zbZVq8gJI(U?W^IW&Moy-U5gzPm}DD+DS2xmy7r3gUiLW*hK*s^rRIH7*GRRK)Qoiv zc7*?aO}%YTp> z^yzL=pM_TNKVaC{Py03tAIwcvrTY&C4Fp6$oB6#77y8VuI!17wYW#r?GV5%XGz#KyrI z50RUQ>sLkaQKBhJ%tHRZ8-9r=^Q@T9Lb>;$6* z&b16qJ=MAf%l5QyJ+WQ@DX;%a7-ii+Y|VTJ+F#YW=D}J6ky!U&Ujt#bljHx8^$-`! znuya~7jdFESpy;KCKR$gaz}iPgthknc+y(uk&xAl!v=oai}l#>zXJb^jddPM|8GGb zUjgeX7C{t@IC?FJtOs$d39;6PnCn6U#6FF2XsO49artL&di_Xm?^&!B=?w2e1dNnC zg54y5Tt%$^$~e@MvH1VoSZAUN9)(`srf>^uO(wy5_zI3d6n4?K2A(Ch>!E>%{~KxD z{nuZU@{f#70sa&`#QK$<-ZP+WK@aa<_>i`&tEsR5N98x>bE^1=LE>=15AZHDfK3||Jy6cLH$eU-eNE+*UU&4sfyNj8`M`Q8C5{ZS{Tkba#pM~**C^F| zxKhJK4pb^r60bF8iB$vP?5}j}-d|V|SWemUr-Hh+fV@ERmI5 j)-G|NiRcGem?cscV~LanSt8@ef-KSfE>j-ukV^g?0I4KO diff --git a/packaging/grafana-agent/deb/control/postinst b/packaging/grafana-agent/deb/control/postinst index 698711ef44..a06af7a86f 100644 --- a/packaging/grafana-agent/deb/control/postinst +++ b/packaging/grafana-agent/deb/control/postinst @@ -29,8 +29,8 @@ case "$1" in chown $GRAFANA_AGENT_USER:$GRAFANA_AGENT_GROUP /var/lib/grafana-agent chmod 770 /var/lib/grafana-agent - chmod 640 /etc/grafana-agent.yaml - chown root:$GRAFANA_AGENT_GROUP /etc/grafana-agent.yaml + chmod 640 /etc/grafana-agent.river + chown root:$GRAFANA_AGENT_GROUP /etc/grafana-agent.river if [ -z ${2+x} ] && [ "$RESTART_ON_UPGRADE" = "true" ]; then if command -v systemctl 2>/dev/null; then diff --git a/packaging/grafana-agent/deb/grafana-agent.service b/packaging/grafana-agent/deb/grafana-agent.service index eea8677f3f..7177972b45 100644 --- a/packaging/grafana-agent/deb/grafana-agent.service +++ b/packaging/grafana-agent/deb/grafana-agent.service @@ -1,6 +1,6 @@ [Unit] -Description=Monitoring system and forwarder -Documentation=https://grafana.com/docs/agent/latest/ +Description=Vendor-neutral programmable observability pipelines. +Documentation=https://grafana.com/docs/agent Wants=network-online.target After=network-online.target @@ -11,10 +11,8 @@ Environment=HOSTNAME=%H Environment=AGENT_DEPLOY_MODE=deb EnvironmentFile=/etc/default/grafana-agent WorkingDirectory=/var/lib/grafana-agent -ExecStart=/usr/bin/grafana-agent --config.file $CONFIG_FILE $CUSTOM_ARGS +ExecStart=/usr/bin/grafana-agent run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent $CONFIG_FILE ExecReload=/usr/bin/env kill -HUP $MAINPID -# If running the Agent in scraping service mode, you will want to override this value with -# something larger to allow the Agent to gracefully leave the cluster. 4800s is recommend. TimeoutStopSec=20s SendSIGKILL=no diff --git a/packaging/grafana-agent/environment-file b/packaging/grafana-agent/environment-file index ea1bf1bf7b..e8b15ea74a 100644 --- a/packaging/grafana-agent/environment-file +++ b/packaging/grafana-agent/environment-file @@ -1,16 +1,16 @@ ## Path: -## Description: Grafana Agent monitoring agent settings +## Description: Grafana Agent settings ## Type: string ## Default: "" ## ServiceRestart: grafana-agent # # Command line options for grafana-agent # -# The configuration file holding the agent config -CONFIG_FILE="/etc/grafana-agent.yaml" +# The configuration file holding the agent config. +CONFIG_FILE="/etc/grafana-agent.river" -# Any user defined arguments -CUSTOM_ARGS="-server.http.address=127.0.0.1:9090 -server.grpc.address=127.0.0.1:9091" +# User-defined arguments to pass to the run command. +CUSTOM_ARGS="" -# Restart on system upgrade. Default to true +# Restart on system upgrade. Defaults to true. RESTART_ON_UPGRADE=true diff --git a/packaging/grafana-agent-flow/grafana-agent-flow.river b/packaging/grafana-agent/grafana-agent.river similarity index 89% rename from packaging/grafana-agent-flow/grafana-agent-flow.river rename to packaging/grafana-agent/grafana-agent.river index 4d74fc7aa9..56a8ea5b4a 100644 --- a/packaging/grafana-agent-flow/grafana-agent-flow.river +++ b/packaging/grafana-agent/grafana-agent.river @@ -1,6 +1,6 @@ -// Sample config for Grafana Agent Flow. +// Sample config for Grafana Agent. // -// For a full configuration reference, see https://grafana.com/docs/agent/latest/flow/ +// For a full configuration reference, see https://grafana.com/docs/agent logging { level = "warn" } diff --git a/packaging/grafana-agent/grafana-agent.yaml b/packaging/grafana-agent/grafana-agent.yaml deleted file mode 100644 index f25f2a2bb8..0000000000 --- a/packaging/grafana-agent/grafana-agent.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Sample config for Grafana Agent -# For a full configuration reference, see: https://grafana.com/docs/agent/latest/configuration/. -server: - log_level: warn - -metrics: - global: - scrape_interval: 1m - wal_directory: '/var/lib/grafana-agent' - configs: - # Example Prometheus scrape configuration to scrape the agent itself for metrics. - # This is not needed if the agent integration is enabled. - # - name: agent - # host_filter: false - # scrape_configs: - # - job_name: agent - # static_configs: - # - targets: ['127.0.0.1:9090'] - -integrations: - agent: - enabled: true - node_exporter: - enabled: true - include_exporter_metrics: true - disable_collectors: - - "mdadm" diff --git a/packaging/grafana-agent/rpm/control/postinst b/packaging/grafana-agent/rpm/control/postinst index 946ada97c1..e908923dc2 100644 --- a/packaging/grafana-agent/rpm/control/postinst +++ b/packaging/grafana-agent/rpm/control/postinst @@ -2,6 +2,10 @@ set -e +# NOTE: the grafana-agent group is shared with the grafana-agent package to +# make it easier to migrate between the two. A unique user is still used to +# give them different home directories. + # shellcheck disable=SC1091 [ -f /etc/sysconfig/grafana-agent ] && . /etc/sysconfig/grafana-agent [ -z "$AGENT_USER" ] && AGENT_USER="grafana-agent" @@ -32,8 +36,8 @@ if [ "$1" -eq 1 ] ; then chown $AGENT_USER:$AGENT_GROUP /var/lib/grafana-agent chmod 770 /var/lib/grafana-agent - chmod 640 /etc/grafana-agent.yaml - chown root:$AGENT_GROUP /etc/grafana-agent.yaml + chmod 640 /etc/grafana-agent.river + chown root:$AGENT_GROUP /etc/grafana-agent.river elif [ "$1" -ge 2 ] ; then add_to_logging_groups diff --git a/packaging/grafana-agent/rpm/grafana-agent.service b/packaging/grafana-agent/rpm/grafana-agent.service index 0089827dcd..ea3165626d 100644 --- a/packaging/grafana-agent/rpm/grafana-agent.service +++ b/packaging/grafana-agent/rpm/grafana-agent.service @@ -1,6 +1,6 @@ [Unit] -Description=Monitoring system and forwarder -Documentation=https://grafana.com/docs/agent/latest/ +Description=Vendor-neutral programmable observability pipelines. +Documentation=https://grafana.com/docs/agent Wants=network-online.target After=network-online.target @@ -11,10 +11,8 @@ Environment=HOSTNAME=%H Environment=AGENT_DEPLOY_MODE=rpm EnvironmentFile=/etc/sysconfig/grafana-agent WorkingDirectory=/var/lib/grafana-agent -ExecStart=/usr/bin/grafana-agent --config.file $CONFIG_FILE $CUSTOM_ARGS +ExecStart=/usr/bin/grafana-agent run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent $CONFIG_FILE ExecReload=/usr/bin/env kill -HUP $MAINPID -# If running the Agent in scraping service mode, you will want to override this value with -# something larger to allow the Agent to gracefully leave the cluster. 4800s is recommend. TimeoutStopSec=20s SendSIGKILL=no diff --git a/packaging/grafana-agent/windows/.gitignore b/packaging/grafana-agent/windows/.gitignore deleted file mode 100644 index 16917d7c31..0000000000 --- a/packaging/grafana-agent/windows/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.exe -LICENSE diff --git a/packaging/grafana-agent/windows/Dockerfile b/packaging/grafana-agent/windows/Dockerfile deleted file mode 100644 index 07b8881d07..0000000000 --- a/packaging/grafana-agent/windows/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM debian -RUN apt-get update && \ - apt-get install -y nsis -ENTRYPOINT makensis -V4 -DVERSION=$VERSION -DOUT="/home/dist/grafana-agent-installer.exe" /home/packaging/windows/install_script.nsis \ No newline at end of file diff --git a/packaging/grafana-agent-flow/windows/config.river b/packaging/grafana-agent/windows/config.river similarity index 100% rename from packaging/grafana-agent-flow/windows/config.river rename to packaging/grafana-agent/windows/config.river diff --git a/packaging/grafana-agent/windows/install_script.nsis b/packaging/grafana-agent/windows/install_script.nsis index a39ec0d4dc..688b8ca9a7 100644 --- a/packaging/grafana-agent/windows/install_script.nsis +++ b/packaging/grafana-agent/windows/install_script.nsis @@ -1,235 +1,200 @@ -Unicode true -# This installs two files, grafana-agent-windows-amd64.exe and logo.ico, creates a start menu shortcut, builds an uninstaller, and -# adds uninstall information to the registry for Add/Remove Programs - -!define APPNAME "Grafana Agent" -# These will be displayed by the "Click here for support information" link in "Add/Remove Programs" -!define HELPURL "https://github.com/grafana/agent/discussions" # "Support Information" link -!define UPDATEURL "https://github.com/grafana/agent/releases" # "Product Updates" link -!define ABOUTURL "https://github.com/grafana/agent" # "Publisher" link - -RequestExecutionLevel admin #Require admin rights on NT6+ (When UAC is turned on) - -InstallDir "$PROGRAMFILES64\${APPNAME}" - -# This will be in the installer/uninstaller's title bar -Name "${APPNAME} ${VERSION}" -Icon "logo.ico" -outFile "${OUT}" +# This script does the following: +# +# 1. Installs grafana-agent--windows-amd64.exe, grafana-agent-service-amd64.exe, and logo.ico. +# 2. Creates a Start Menu shortcut. +# 3. Builds an uninstaller. +# 4. Adds uninstall information to the registry for Add/Remove Programs. +# 5. Initializes the registry with appropriate settings. +Unicode true !include nsDialogs.nsh !include FileFunc.nsh +!include .\macros.nsis + +!define APPNAME "Grafana Agent" +!define HELPURL "https://grafana.com/docs/agent" +!define UPDATEURL "https://github.com/grafana/agent/releases" +!define ABOUTURL "https://github.com/grafana/agent" + +# Because we modify the registry and install a service that runs as +# LocalSystem, we require admin permissions. +RequestExecutionLevel admin -LicenseData LICENSE +Name "${APPNAME} ${VERSION}" # Shown in title bar for installer/uninstaller +Icon "logo.ico" +InstallDir "$PROGRAMFILES64\${APPNAME}" +LicenseData ..\..\..\LICENSE +OutFile "${OUT}" # Everything must be global Vars -Var EnableOptionsDialog Var PassedInParameters -Var EnableExporterCheck -Var EnableExporterValue -Var Url -Var Username -Var Password -Var ExpandEnvCheck -Var ExpandEnvValue - +Var Config +Var ConfigFlag +Var Environment +Var DisableReporting +Var DisableReportingFlag +Var DisableProfiling +Var DisableProfilingFlag + +# Pages during the installer. Page license Page directory -Page custom enableOptions enableOptionsLeave Page instfiles -# Annoyingly macros need to be defined before use -!macro VerifyUserIsAdmin -UserInfo::GetAccountType -pop $0 -${If} $0 != "admin" #Require admin rights on NT4+ - messageBox mb_iconstop "Administrator rights required!" - setErrorLevel 740 #ERROR_ELEVATION_REQUIRED - quit -${EndIf} -!macroend - +# Automatically called when installing. Function .onInit - setShellVarContext all - !insertmacro VerifyUserIsAdmin + SetShellVarContext all + !insertmacro VerifyUserIsAdmin FunctionEnd Section "install" - IfSilent ThisIsSilent RunInstaller - ThisIsSilent: - ${GetParameters} $PassedInParameters - ${GetOptions} $PassedInParameters "/EnableExporter" $EnableExporterValue - ${GetOptions} $PassedInParameters "/Url" $Url - ${GetOptions} $PassedInParameters "/Username" $Username - ${GetOptions} $PassedInParameters "/Password" $Password - ${GetOptions} $PassedInParameters "/ExpandEnv" $ExpandEnvValue - Call Install - Return - RunInstaller: - Call Install + ${GetParameters} $PassedInParameters + ${GetOptions} $PassedInParameters "/DISABLEPROFILING=" $DisableProfiling + ${GetOptions} $PassedInParameters "/DISABLEREPORTING=" $DisableReporting + ${GetOptions} $PassedInParameters "/ENVIRONMENT=" $Environment + ${GetOptions} $PassedInParameters "/CONFIG=" $Config + + # Calls to functions like nsExec::ExecToLog below push the exit code to the + # stack, and must be popped after calling. + + # Preemptively stop the existing service if it's running. + nsExec::ExecToLog 'sc stop "Grafana Agent"' + Pop $0 + + # Configure the out path and copy files to it. + SetOutPath "$INSTDIR" + File "..\..\..\dist.temp\grafana-agent-windows-amd64.exe" + File "..\..\..\dist.temp\grafana-agent-service-windows-amd64.exe" + File "logo.ico" + + # Create an uninstaller at the same pathFunctionEnd + WriteUninstaller "$INSTDIR\uninstall.exe" + + # Registry information for Add/Remote Programs. It's OK for this to + # overwriting existing registry entries since we want it to be relevant to + # the current installed version. + !define UNINSTALLKEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" + WriteRegStr HKLM "${UNINSTALLKEY}" "DisplayName" '${APPNAME} ${VERSION}' + WriteRegStr HKLM "${UNINSTALLKEY}" "UninstallString" '"$INSTDIR\uninstall.exe"' + WriteRegStr HKLM "${UNINSTALLKEY}" "QuietUninstallString" '"$INSTDIR\uninstall.exe" /S' + WriteRegStr HKLM "${UNINSTALLKEY}" "InstallLocation" '"$INSTDIR"' + WriteRegStr HKLM "${UNINSTALLKEY}" "DisplayIcon" '"$INSTDIR\logo.ico"' + WriteRegStr HKLM "${UNINSTALLKEY}" "Publisher" '"${ABOUTURL}"' + WriteRegStr HKLM "${UNINSTALLKEY}" "HelpLink" '"${HELPURL}"' + WriteRegStr HKLM "${UNINSTALLKEY}" "URLUpdateInfo" '"${UPDATEURL}"' + WriteRegStr HKLM "${UNINSTALLKEY}" "URLInfoAbout" '"${ABOUTURL}"' + WriteRegDWORD HKLM "${UNINSTALLKEY}" "NoModify" 1 + WriteRegDWORD HKLM "${UNINSTALLKEY}" "NoRepair" 1 + + Call CreateConfig + Call InitializeRegistry + + # Create the service. + nsExec::ExecToLog 'sc create "Grafana Agent" start= delayed-auto binpath= "$INSTDIR\grafana-agent-service-windows-amd64.exe"' + Pop $0 + + # Start the service. + nsExec::ExecToLog 'sc start "Grafana Agent"' + Pop $0 + + # Auto-restart agent on failure. Reset failure counter after 60 seconds without failure + nsExec::ExecToLog `sc failure "Grafana Agent" reset= 60 actions= restart/5000 reboot= "Grafana Agent has failed. Restarting in 5 seconds"` + Pop $0 SectionEnd -Function enableOptions - nsDialogs::Create 1018 - Pop $EnableOptionsDialog - - ${If} $EnableOptionsDialog == error - Abort - ${EndIf} - - ${NSD_CreateLabel} 0 0 100% 12u "Enable Windows Exporter" - Pop $0 - - ${NSD_CreateCheckBox} 0 13u 100% 12u "" - Pop $EnableExporterCheck - - ${NSD_CreateLabel} 0 26u 100% 12u "Expand Environment Variables" - Pop $0 - - ${NSD_CreateCheckBox} 0 39u 100% 12u "" - Pop $ExpandEnvCheck - - nsDialogs::Show -FunctionEnd - -Function enableOptionsLeave - ${NSD_GetState} $EnableExporterCheck $EnableExporterValue - ${If} $EnableExporterValue == ${BST_CHECKED} - StrCpy $EnableExporterValue "true" - ${Else} - StrCpy $EnableExporterValue "false" - ${EndIf} - - ${NSD_GetState} $ExpandEnvCheck $ExpandEnvValue - ${If} $ExpandEnvValue == ${BST_CHECKED} - StrCpy $ExpandEnvValue "true" - ${Else} - StrCpy $ExpandEnvValue "false" - ${EndIf} -FunctionEnd - -Function Install - # Preemptively stop Grafana Agent if it is running - nsExec::ExecToLog 'sc stop "Grafana Agent"' - Pop $0 - # Files for the install directory - to build the installer, these should be in the same directory as the install script (this file) - setOutPath $INSTDIR - # Files added here should be removed by the uninstaller (see section "uninstall") - file "grafana-agent-windows-amd64.exe" - file "logo.ico" - # Add any other files for the install directory (license files, app data, etc) here - - # Uninstaller - See function un.onInit and section "uninstall" for configuration - writeUninstaller "$INSTDIR\uninstall.exe" - - # Registry information for add/remove programs - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "DisplayName" "${APPNAME} ${VERSION}" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "UninstallString" "$\"$INSTDIR\uninstall.exe$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "QuietUninstallString" "$\"$INSTDIR\uninstall.exe$\" /S" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "InstallLocation" "$\"$INSTDIR$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "DisplayIcon" "$\"$INSTDIR\logo.ico$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "Publisher" "$\"https://github.com/grafana/agent$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "HelpLink" "$\"${HELPURL}$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "URLUpdateInfo" "$\"${UPDATEURL}$\"" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "URLInfoAbout" "$\"${ABOUTURL}$\"" - # There is no option for modifying or repairing the install - WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "NoModify" 1 - WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" "NoRepair" 1 - Call WriteConfig - - - # Create our batch file, since services cant run with parameters, nsexec is used to suppress console output, instead goes - # to NSIS log window - nsExec::ExecToLog 'sc create "Grafana Agent" binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\""' - Pop $0 - # These separate create and config commands are needed, on the config the binpath is required - ${If} $ExpandEnvValue == "true" - nsExec::ExecToLog 'sc config "Grafana Agent" start= auto binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\" -config.expand-env -config.file=\"$INSTDIR\agent-config.yaml\""' - ${Else} - nsExec::ExecToLog 'sc config "Grafana Agent" start= auto binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\" -config.file=\"$INSTDIR\agent-config.yaml\""' - ${EndIf} - Pop $0 - nsExec::ExecToLog `sc start "Grafana Agent"` - Pop $0 - # Auto-restart agent on failure. Reset failure counter after 60 seconds without failure - nsExec::ExecToLog `sc failure "Grafana Agent" reset= 60 actions= restart/5000 reboot= "Grafana Agent has failed. Restarting in 5 seconds"` - Pop $0 +Function CreateConfig + IfFileExists "$INSTDIR\config.river" Noop CreateNewConfig + Noop: + Return + CreateNewConfig: + File "config.river" + Return FunctionEnd -Function WriteConfig - # If the file exists we dont want to overwrite it since they may have custom code there - IfFileExists "$INSTDIR\agent-config.yaml" ReturnEarly WriteFile - ReturnEarly: - Return - WriteFile: - # Write the config file, its easier to do this way than replacing an values in a templated file - FileOpen $9 "$INSTDIR\agent-config.yaml" w #Opens a Empty File and fills it - FileWrite $9 `server:$\n` - FileWrite $9 ` log_level: warn$\n` - FileWrite $9 `metrics:$\n` - FileWrite $9 ` wal_directory: $APPDATA\grafana-agent-wal$\n` - FileWrite $9 ` global:$\n` - FileWrite $9 ` scrape_interval: 1m$\n` - ${If} $Url != "" - FileWrite $9 ` remote_write: $\n` - FileWrite $9 ` - url: $Url $\n` - FileWrite $9 ` basic_auth: $\n` - FileWrite $9 ` username: $Username $\n` - FileWrite $9 ` password: $Password $\n` - ${EndIf} - FileWrite $9 ` configs:$\n` - FileWrite $9 ` - name: integrations$\n` - ${If} $EnableExporterValue == "true" - FileWrite $9 `integrations:$\n` - FileWrite $9 ` windows_exporter:$\n` - FileWrite $9 ` enabled: true` - ${EndIf} - FileClose $9 # and close the file - Return +# InitializeRegistry initializes the keys in the registry that the service +# runner uses. If the registry values already exist, they are not overwritten. +Function InitializeRegistry + !define REGKEY "HKLM\Software\Grafana\Grafana Agent" + + # Define the default key, which points to the service. + nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /ve' + Pop $0 + ${If} $0 == 1 + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /ve /d "$INSTDIR\grafana-agent-windows-amd64.exe"' + Pop $0 # Ignore return result + ${EndIf} + + ${If} $Config != "" + StrCpy $ConfigFlag "$Config" + ${Else} + StrCpy $ConfigFlag "$INSTDIR\config.river" + ${EndIf} + + ${If} $DisableReporting == "yes" + StrCpy $DisableReportingFlag "--disable-reporting\0" + ${Else} + StrCpy $DisableReportingFlag "" + ${EndIf} + + ${If} $DisableProfiling == "yes" + StrCpy $DisableProfilingFlag "--server.http.enable-pprof=false\0" + ${Else} + StrCpy $DisableProfilingFlag "" + ${EndIf} + + # Define the arguments key, which holds arguments to pass to the + # service. + nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Arguments' + Pop $0 + ${If} $0 == 1 + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Arguments /t REG_MULTI_SZ /d "run"\0"$ConfigFlag"\0"--storage.path=$APPDATA\${APPNAME}\data"\0"$DisableReportingFlag$DisableProfilingFlag"' + Pop $0 # Ignore return result + ${EndIf} + + nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Environment' + Pop $0 + ${If} $0 == 1 + # Define the environment key, which holds environment variables to pass to the + # service. + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' + Pop $0 # Ignore return result + ${EndIf} + + Return FunctionEnd -# Uninstaller +# Automatically called when uninstalling. Function un.onInit - SetShellVarContext all - IfSilent ThisIsSilent WarnUser - ThisIsSilent: - Return - WarnUser: - #Verify the uninstaller - last chance to back out - MessageBox MB_OKCANCEL "Permanently remove ${APPNAME}? This will remove the WAL and the agent config." IDOK next - Abort - next: - !insertmacro VerifyUserIsAdmin + SetShellVarContext all + + IfSilent Noop WarnUser + Noop: + Return + WarnUser: + MessageBox MB_OKCANCEL "Permanently remove ${APPNAME}? This will remove all data for the application." IDOK Continue + Abort + Continue: + !insertmacro VerifyUserIsAdmin + Return FunctionEnd Section "uninstall" - DetailPrint "Starting Uninstaller" - # Remove Start Menu launcher - delete "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" - # Try to remove the Start Menu folder - this will only happen if it is empty - RMDir "$SMPROGRAMS\${APPNAME}" - # This is cleanup on the service and removing the exporter. - nsExec::ExecToLog `sc stop "Grafana Agent"` - Pop $0 - nsExec::ExecToLog `sc delete "Grafana Agent" confirm` - Pop $0 - - # Remove files - delete $INSTDIR\agent-windows-amd64.exe # Old binary name, left in for upgrades - delete $INSTDIR\grafana-agent-windows-amd64.exe # New binary name - delete $INSTDIR\logo.ico - delete $INSTDIR\agent-config.yaml - - # Always delete uninstaller as the last action - delete $INSTDIR\uninstall.exe - - # Try to remove the install directory - this will only happen if it is empty - RMDir $INSTDIR - - RMDir /r $APPDATA\grafana-agent-wal - - # Remove service and uninstaller information from the registry. - nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" /reg:64 /f' - Pop $0 + DetailPrint "Starting uninstaller." + + # Stop and remove service. + nsExec::ExecToLog 'sc stop "Grafana Agent"' + Pop $0 + nsExec::ExecToLog 'sc delete "Grafana Agent"' + Pop $0 + + RMDir /r "$SMPROGRAMS\${APPNAME}" # Start Menu folder. + RMDir /r "$INSTDIR" # Install directory. + RMDir /r "$APPDATA\${APPNAME}" # Application data. + + # Remove service and uninstaller information from the registry. + nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Grafana\Grafana Agent" /reg:64 /f' + Pop $0 + nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" /reg:64 /f' + Pop $0 SectionEnd diff --git a/packaging/grafana-agent-flow/windows/macros.nsis b/packaging/grafana-agent/windows/macros.nsis similarity index 100% rename from packaging/grafana-agent-flow/windows/macros.nsis rename to packaging/grafana-agent/windows/macros.nsis diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index ee01a09051..afc77e207c 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -19,8 +19,6 @@ export DRONE_BRANCH=${DRONE_BRANCH:-} export AGENT_IMAGE=grafana/agent export AGENT_BORINGCRYPTO_IMAGE=grafana/agent-boringcrypto -export AGENTCTL_IMAGE=grafana/agentctl -export OPERATOR_IMAGE=grafana/agent-operator # We need to determine what version to assign to built binaries. If containers # are being built from a Drone tag trigger, we force the version to come from the @@ -80,32 +78,8 @@ case "$TARGET_CONTAINER" in . ;; - - - agentctl) - docker buildx build --push \ - --platform $BUILD_PLATFORMS \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - -t "$AGENTCTL_IMAGE:$VERSION" \ - -t "$AGENTCTL_IMAGE:$BRANCH_TAG" \ - -f cmd/grafana-agentctl/Dockerfile \ - . - ;; - - agent-operator) - docker buildx build --push \ - --platform $BUILD_PLATFORMS \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - -t "$OPERATOR_IMAGE:$VERSION" \ - -t "$OPERATOR_IMAGE:$BRANCH_TAG" \ - -f cmd/grafana-agent-operator/Dockerfile \ - . - ;; - *) - echo "Usage: $0 agent|agent-boringcrypto|agentctl|agent-operator" + echo "Usage: $0 agent|agent-boringcrypto" exit 1 ;; esac diff --git a/tools/generate-crds.bash b/tools/generate-crds.bash deleted file mode 100755 index 6bf999ce52..0000000000 --- a/tools/generate-crds.bash +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -ROOT=$(git rev-parse --show-toplevel) - - -# Generate objects and controllers for our CRDs -cd $ROOT/internal/static/operator/apis/monitoring/v1alpha1 -controller-gen object paths=. -controller-gen crd:crdVersions=v1,maxDescLen=0 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds - -# Generate CRDs for prometheus-operator. -PROM_OP_DEP_NAME="github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" -PROM_OP_DIR=$(go list -f '{{.Dir}}' $PROM_OP_DEP_NAME) - -cd $PROM_OP_DIR -controller-gen crd:crdVersions=v1,maxDescLen=0 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds - -# Remove known Prometheus-Operator CRDS we don't generate. (An allowlist would -# be better here, but rfratto's bash skills are bad.) -rm -f $ROOT/operations/agent-static-operator/crds/monitoring.coreos.com_alertmanagers.yaml -rm -f $ROOT/operations/agent-static-operator/crds/monitoring.coreos.com_prometheuses.yaml -rm -f $ROOT/operations/agent-static-operator/crds/monitoring.coreos.com_prometheusrules.yaml -rm -f $ROOT/operations/agent-static-operator/crds/monitoring.coreos.com_thanosrulers.yaml diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index 18c8569c4f..f7ec9d86a1 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -3,7 +3,7 @@ PARENT_MAKEFILE := $(firstword $(MAKEFILE_LIST)) .PHONY: dist clean-dist -dist: dist-agent-binaries dist-agent-flow-binaries dist-agentctl-binaries dist-agent-packages dist-agent-flow-packages dist-agent-installer dist-agent-flow-installer +dist: dist-agent-binaries dist-agent-packages dist-agent-installer clean-dist: rm -rf ./dist/* ./dist.temp/* @@ -89,14 +89,12 @@ dist/grafana-agent-windows-boringcrypto-amd64.exe: GOARCH := amd64 dist/grafana-agent-windows-boringcrypto-amd64.exe: generate-ui $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - dist/grafana-agent-freebsd-amd64: GO_TAGS += netgo builtinassets dist/grafana-agent-freebsd-amd64: GOOS := freebsd dist/grafana-agent-freebsd-amd64: GOARCH := amd64 dist/grafana-agent-freebsd-amd64: generate-ui $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - dist/grafana-agent-linux-amd64-boringcrypto: GO_TAGS += netgo builtinassets promtail_journal_enabled dist/grafana-agent-linux-amd64-boringcrypto: GOOS := linux dist/grafana-agent-linux-amd64-boringcrypto: GOARCH := amd64 @@ -111,118 +109,12 @@ dist/grafana-agent-linux-arm64-boringcrypto: GOEXPERIMENT := boringcrypto dist/grafana-agent-linux-arm64-boringcrypto: generate-ui $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent -# -# agentctl release binaries. -# - -dist-agentctl-binaries: dist/grafana-agentctl-linux-amd64 \ - dist/grafana-agentctl-linux-arm64 \ - dist/grafana-agentctl-linux-ppc64le \ - dist/grafana-agentctl-linux-s390x \ - dist/grafana-agentctl-darwin-amd64 \ - dist/grafana-agentctl-darwin-arm64 \ - dist/grafana-agentctl-windows-amd64.exe \ - dist/grafana-agentctl-freebsd-amd64 - -dist/grafana-agentctl-linux-amd64: GO_TAGS += netgo promtail_journal_enabled -dist/grafana-agentctl-linux-amd64: GOOS := linux -dist/grafana-agentctl-linux-amd64: GOARCH := amd64 -dist/grafana-agentctl-linux-amd64: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-linux-arm64: GO_TAGS += netgo promtail_journal_enabled -dist/grafana-agentctl-linux-arm64: GOOS := linux -dist/grafana-agentctl-linux-arm64: GOARCH := arm64 -dist/grafana-agentctl-linux-arm64: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-linux-ppc64le: GO_TAGS += netgo promtail_journal_enabled -dist/grafana-agentctl-linux-ppc64le: GOOS := linux -dist/grafana-agentctl-linux-ppc64le: GOARCH := ppc64le -dist/grafana-agentctl-linux-ppc64le: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-linux-s390x: GO_TAGS += netgo promtail_journal_enabled -dist/grafana-agentctl-linux-s390x: GOOS := linux -dist/grafana-agentctl-linux-s390x: GOARCH := s390x -dist/grafana-agentctl-linux-s390x: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-darwin-amd64: GO_TAGS += netgo -dist/grafana-agentctl-darwin-amd64: GOOS := darwin -dist/grafana-agentctl-darwin-amd64: GOARCH := amd64 -dist/grafana-agentctl-darwin-amd64: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-darwin-arm64: GO_TAGS += netgo -dist/grafana-agentctl-darwin-arm64: GOOS := darwin -dist/grafana-agentctl-darwin-arm64: GOARCH := arm64 -dist/grafana-agentctl-darwin-arm64: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-windows-amd64.exe: GOOS := windows -dist/grafana-agentctl-windows-amd64.exe: GOARCH := amd64 -dist/grafana-agentctl-windows-amd64.exe: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -dist/grafana-agentctl-freebsd-amd64: GO_TAGS += netgo -dist/grafana-agentctl-freebsd-amd64: GOOS := freebsd -dist/grafana-agentctl-freebsd-amd64: GOARCH := amd64 -dist/grafana-agentctl-freebsd-amd64: - $(PACKAGING_VARS) AGENTCTL_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agentctl - -# -# agent-flow release binaries -# -# agent-flow release binaries are intermediate build assets used for producing -# Flow-specific system packages. As such, they are built in a dist.temp -# directory instead of the normal dist directory. -# -# Only targets needed for system packages are used here. -# - -dist-agent-flow-binaries: dist.temp/grafana-agent-flow-linux-amd64 \ - dist.temp/grafana-agent-flow-linux-arm64 \ - dist.temp/grafana-agent-flow-linux-ppc64le \ - dist.temp/grafana-agent-flow-linux-s390x \ - dist.temp/grafana-agent-flow-windows-amd64.exe - -dist.temp/grafana-agent-flow-linux-amd64: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist.temp/grafana-agent-flow-linux-amd64: GOOS := linux -dist.temp/grafana-agent-flow-linux-amd64: GOARCH := amd64 -dist.temp/grafana-agent-flow-linux-amd64: generate-ui - $(PACKAGING_VARS) FLOW_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-flow - -dist.temp/grafana-agent-flow-linux-arm64: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist.temp/grafana-agent-flow-linux-arm64: GOOS := linux -dist.temp/grafana-agent-flow-linux-arm64: GOARCH := arm64 -dist.temp/grafana-agent-flow-linux-arm64: generate-ui - $(PACKAGING_VARS) FLOW_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-flow - -dist.temp/grafana-agent-flow-linux-ppc64le: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist.temp/grafana-agent-flow-linux-ppc64le: GOOS := linux -dist.temp/grafana-agent-flow-linux-ppc64le: GOARCH := ppc64le -dist.temp/grafana-agent-flow-linux-ppc64le: generate-ui - $(PACKAGING_VARS) FLOW_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-flow - -dist.temp/grafana-agent-flow-linux-s390x: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist.temp/grafana-agent-flow-linux-s390x: GOOS := linux -dist.temp/grafana-agent-flow-linux-s390x: GOARCH := s390x -dist.temp/grafana-agent-flow-linux-s390x: generate-ui - $(PACKAGING_VARS) FLOW_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-flow - -dist.temp/grafana-agent-flow-windows-amd64.exe: GO_TAGS += builtinassets -dist.temp/grafana-agent-flow-windows-amd64.exe: GOOS := windows -dist.temp/grafana-agent-flow-windows-amd64.exe: GOARCH := amd64 -dist.temp/grafana-agent-flow-windows-amd64.exe: generate-ui - $(PACKAGING_VARS) FLOW_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-flow - # # agent-service release binaries. # # agent-service release binaries are intermediate build assets used for -# producing Flow-specific system packages. As such, they are built in a -# dist.temp directory instead of the normal dist directory. +# producing Windows system packages. As such, they are built in a dist.temp +# directory instead of the normal dist directory. # # Only targets needed for system packages are used here. # @@ -235,7 +127,6 @@ dist.temp/grafana-agent-service-windows-amd64.exe: GOARCH := amd64 dist.temp/grafana-agent-service-windows-amd64.exe: generate-ui $(PACKAGING_VARS) SERVICE_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-service - # # DEB and RPM grafana-agent packages. # @@ -255,15 +146,14 @@ define generate_agent_fpm = -t $(1) \ --after-install packaging/grafana-agent/$(1)/control/postinst \ --before-remove packaging/grafana-agent/$(1)/control/prerm \ - --config-files /etc/grafana-agent.yaml \ + --config-files /etc/grafana-agent.river \ --config-files $(AGENT_ENVIRONMENT_FILE_$(1)) \ --rpm-rpmbuild-define "_build_id_links none" \ --package $(4) \ - dist/grafana-agent-linux-$(3)=/usr/bin/grafana-agent \ - dist/grafana-agentctl-linux-$(3)=/usr/bin/grafana-agentctl \ - packaging/grafana-agent/grafana-agent.yaml=/etc/grafana-agent.yaml \ - packaging/grafana-agent/environment-file=$(AGENT_ENVIRONMENT_FILE_$(1)) \ - packaging/grafana-agent/$(1)/grafana-agent.service=/usr/lib/systemd/system/grafana-agent.service + dist.temp/grafana-agent-linux-$(3)=/usr/bin/grafana-agent \ + packaging/grafana-agent-/grafana-agent.river=/etc/grafana-agent.river \ + packaging/grafana-agent-/environment-file=$(AGENT_ENVIRONMENT_FILE_$(1)) \ + packaging/grafana-agent-/$(1)/grafana-agent.service=/usr/lib/systemd/system/grafana-agent.service endef AGENT_PACKAGE_VERSION := $(patsubst v%,%,$(VERSION)) @@ -277,7 +167,7 @@ dist-agent-packages: dist-agent-packages-amd64 \ dist-agent-packages-s390x .PHONY: dist-agent-packages-amd64 -dist-agent-packages-amd64: dist/grafana-agent-linux-amd64 dist/grafana-agentctl-linux-amd64 +dist-agent-packages-amd64: dist.temp/grafana-agent-linux-amd64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -286,7 +176,7 @@ else endif .PHONY: dist-agent-packages-arm64 -dist-agent-packages-arm64: dist/grafana-agent-linux-arm64 dist/grafana-agentctl-linux-arm64 +dist-agent-packages-arm64: dist.temp/grafana-agent-linux-arm64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -295,7 +185,7 @@ else endif .PHONY: dist-agent-packages-ppc64le -dist-agent-packages-ppc64le: dist/grafana-agent-linux-ppc64le dist/grafana-agentctl-linux-ppc64le +dist-agent-packages-ppc64le: dist.temp/grafana-agent-linux-ppc64le ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -304,7 +194,7 @@ else endif .PHONY: dist-agent-packages-s390x -dist-agent-packages-s390x: dist/grafana-agent-linux-s390x dist/grafana-agentctl-linux-s390x +dist-agent-packages-s390x: dist.temp/grafana-agent-linux-s390x ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -312,105 +202,16 @@ else $(call generate_agent_fpm,rpm,s390x,s390x,$(AGENT_PACKAGE_PREFIX).s390x.rpm) endif -# -# DEB and RPM grafana-agent-flow packages. -# - -FLOW_ENVIRONMENT_FILE_rpm := /etc/sysconfig/grafana-agent-flow -FLOW_ENVIRONMENT_FILE_deb := /etc/default/grafana-agent-flow - -# generate_flow_fpm(deb|rpm, package arch, agent arch, output file) -define generate_flow_fpm = - fpm -s dir -v $(FLOW_PACKAGE_VERSION) -a $(2) \ - -n grafana-agent-flow --iteration $(FLOW_PACKAGE_RELEASE) -f \ - --log error \ - --license "Apache 2.0" \ - --vendor "Grafana Labs" \ - --url "https://github.com/grafana/agent" \ - --rpm-digest sha256 \ - -t $(1) \ - --after-install packaging/grafana-agent-flow/$(1)/control/postinst \ - --before-remove packaging/grafana-agent-flow/$(1)/control/prerm \ - --config-files /etc/grafana-agent-flow.river \ - --config-files $(FLOW_ENVIRONMENT_FILE_$(1)) \ - --rpm-rpmbuild-define "_build_id_links none" \ - --package $(4) \ - dist.temp/grafana-agent-flow-linux-$(3)=/usr/bin/grafana-agent-flow \ - packaging/grafana-agent-flow/grafana-agent-flow.river=/etc/grafana-agent-flow.river \ - packaging/grafana-agent-flow/environment-file=$(FLOW_ENVIRONMENT_FILE_$(1)) \ - packaging/grafana-agent-flow/$(1)/grafana-agent-flow.service=/usr/lib/systemd/system/grafana-agent-flow.service -endef - -FLOW_PACKAGE_VERSION := $(patsubst v%,%,$(VERSION)) -FLOW_PACKAGE_RELEASE := 1 -FLOW_PACKAGE_PREFIX := dist/grafana-agent-flow-$(AGENT_PACKAGE_VERSION)-$(AGENT_PACKAGE_RELEASE) - -.PHONY: dist-agent-flow-packages -dist-agent-flow-packages: dist-agent-flow-packages-amd64 \ - dist-agent-flow-packages-arm64 \ - dist-agent-flow-packages-ppc64le \ - dist-agent-flow-packages-s390x - -.PHONY: dist-agent-flow-packages-amd64 -dist-agent-flow-packages-amd64: dist.temp/grafana-agent-flow-linux-amd64 -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(call generate_flow_fpm,deb,amd64,amd64,$(FLOW_PACKAGE_PREFIX).amd64.deb) - $(call generate_flow_fpm,rpm,x86_64,amd64,$(FLOW_PACKAGE_PREFIX).amd64.rpm) -endif - -.PHONY: dist-agent-flow-packages-arm64 -dist-agent-flow-packages-arm64: dist.temp/grafana-agent-flow-linux-arm64 -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(call generate_flow_fpm,deb,arm64,arm64,$(FLOW_PACKAGE_PREFIX).arm64.deb) - $(call generate_flow_fpm,rpm,aarch64,arm64,$(FLOW_PACKAGE_PREFIX).arm64.rpm) -endif - -.PHONY: dist-agent-flow-packages-ppc64le -dist-agent-flow-packages-ppc64le: dist.temp/grafana-agent-flow-linux-ppc64le -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(call generate_flow_fpm,deb,ppc64el,ppc64le,$(FLOW_PACKAGE_PREFIX).ppc64el.deb) - $(call generate_flow_fpm,rpm,ppc64le,ppc64le,$(FLOW_PACKAGE_PREFIX).ppc64le.rpm) -endif - -.PHONY: dist-agent-flow-packages-s390x -dist-agent-flow-packages-s390x: dist.temp/grafana-agent-flow-linux-s390x -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(call generate_flow_fpm,deb,s390x,s390x,$(FLOW_PACKAGE_PREFIX).s390x.deb) - $(call generate_flow_fpm,rpm,s390x,s390x,$(FLOW_PACKAGE_PREFIX).s390x.rpm) -endif - # # Windows installer # -# TODO(rfratto): update the install_script.nsis so we don't need to copy assets -# over into the packaging/windows folder. .PHONY: dist-agent-installer -dist-agent-installer: dist/grafana-agent-windows-amd64.exe +dist-agent-installer: dist.temp/grafana-agent-windows-amd64.exe dist.temp/grafana-agent-service-windows-amd64.exe ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - cp ./dist/grafana-agent-windows-amd64.exe ./packaging/grafana-agent/windows - cp LICENSE ./packaging/grafana-agent/windows # quotes around mkdir are manadory. ref: https://github.com/grafana/agent/pull/5664#discussion_r1378796371 "mkdir" -p dist makensis -V4 -DVERSION=$(VERSION) -DOUT="../../../dist/grafana-agent-installer.exe" ./packaging/grafana-agent/windows/install_script.nsis endif - -.PHONY: dist-agent-flow-installer -dist-agent-flow-installer: dist.temp/grafana-agent-flow-windows-amd64.exe dist.temp/grafana-agent-service-windows-amd64.exe -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - # quotes around mkdir are manadory. ref: https://github.com/grafana/agent/pull/5664#discussion_r1378796371 - "mkdir" -p dist - makensis -V4 -DVERSION=$(VERSION) -DOUT="../../../dist/grafana-agent-flow-installer.exe" ./packaging/grafana-agent-flow/windows/install_script.nsis -endif diff --git a/tools/release b/tools/release index 1785ffc5bb..dbbf1738dc 100755 --- a/tools/release +++ b/tools/release @@ -11,7 +11,6 @@ find dist/ -type f \ # Sign the RPM packages. DEB packages aren't signed. ./packaging/grafana-agent/rpm/gpg-sign.sh -./packaging/grafana-agent-flow/rpm/gpg-sign.sh # Get the SHA256SUMS before continuing. pushd dist && sha256sum -- * > SHA256SUMS && popd || exit From cffb19d40465128ff38cc3ec6ea9d5a1f566cdd8 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Fri, 1 Mar 2024 05:11:19 -0800 Subject: [PATCH 005/136] Update product name and doc structure (#9) * Delete operator doc tree * Delete static doc tree * Move concepts up one level * Move get-started up one level * Move reference up one level * Move tasks up one level * Move tutorials up one level * Move release notes up one level * Merge index files * Delete flow doc root * Draft fix to index topic generation * Tweak makefile and variable file for make docs * Add temp product variables * Fix up links in root level topics * Update tutorials with new prod name and structure * Update reference paths in tasks * Update clustering topic * Make root topic alias match child tpic aliases * Updtae links in cli topics * Update metadata in compatibility topic * Update links in config blocks * Update links in stdlib * Update links in get stated run * Update get started install topics * Update links and remove old aliases * Update concepts links * Update some of the shared content links * Update some of the canonical URLs * Clean up shared linking * Clean up links and metadata in shared topics * Update links in discovery components * Update links and metadata on loki components * Update links amd metadata * Clean up last relrefs * Fix a typo * Update the Makefile * FIx links in dev docs * Revert some changes to fix conflicts * Revert changes in Makefile to fix conflicts * Regenerate index topic * Remove old files for operator and static * Update build tests --- .../writing-flow-component-documentation.md | 4 +- docs/sources/_index.md | 108 +- docs/sources/_index.md.t | 108 +- docs/sources/about.md | 158 ++- docs/sources/concepts/_index.md | 12 + docs/sources/concepts/clustering.md | 69 + .../concepts/component_controller.md | 20 +- .../sources/{flow => }/concepts/components.md | 8 +- .../concepts/config-language/_index.md | 25 +- .../concepts/config-language/components.md | 25 +- .../config-language/expressions/_index.md | 21 + .../expressions/function_calls.md | 28 + .../config-language/expressions/operators.md | 14 +- .../expressions/referencing_exports.md | 22 +- .../expressions/types_and_values.md | 23 +- .../sources/concepts/config-language/files.md | 14 + .../concepts/config-language/syntax.md | 33 +- .../{flow => }/concepts/custom_components.md | 25 +- docs/sources/{flow => }/concepts/modules.md | 47 +- docs/sources/data-collection.md | 46 +- docs/sources/flow/_index.md | 98 -- docs/sources/flow/concepts/_index.md | 18 - docs/sources/flow/concepts/clustering.md | 83 -- .../config-language/expressions/_index.md | 38 - .../expressions/function_calls.md | 43 - .../flow/concepts/config-language/files.md | 26 - docs/sources/flow/get-started/_index.md | 25 - .../flow/get-started/install/_index.md | 46 - .../flow/get-started/install/kubernetes.md | 75 -- .../sources/flow/get-started/install/macos.md | 85 -- docs/sources/flow/get-started/run/_index.md | 31 - docs/sources/flow/get-started/run/macos.md | 69 - docs/sources/flow/get-started/run/windows.md | 54 - docs/sources/flow/reference/_index.md | 18 - docs/sources/flow/reference/cli/_index.md | 34 - docs/sources/flow/reference/cli/convert.md | 116 -- docs/sources/flow/reference/cli/fmt.md | 45 - .../flow/reference/components/_index.md | 19 - .../reference/components/faro.receiver.md | 286 ----- .../flow/reference/components/local.file.md | 84 -- .../loki.source.azure_event_hubs.md | 151 --- .../components/otelcol.exporter.otlphttp.md | 171 --- ...telcol.extension.jaeger_remote_sampling.md | 309 ----- .../flow/reference/config-blocks/_index.md | 21 - docs/sources/flow/reference/stdlib/_index.md | 23 - .../sources/flow/reference/stdlib/coalesce.md | 28 - docs/sources/flow/reference/stdlib/concat.md | 33 - .../flow/reference/stdlib/constants.md | 34 - docs/sources/flow/reference/stdlib/env.md | 26 - docs/sources/flow/reference/stdlib/join.md | 30 - .../flow/reference/stdlib/json_decode.md | 48 - .../flow/reference/stdlib/json_path.md | 47 - .../flow/reference/stdlib/nonsensitive.md | 34 - docs/sources/flow/reference/stdlib/replace.md | 26 - docs/sources/flow/reference/stdlib/split.md | 32 - .../sources/flow/reference/stdlib/to_lower.md | 22 - .../sources/flow/reference/stdlib/to_upper.md | 22 - docs/sources/flow/reference/stdlib/trim.md | 32 - .../flow/reference/stdlib/trim_prefix.md | 22 - .../flow/reference/stdlib/trim_space.md | 22 - .../flow/reference/stdlib/trim_suffix.md | 22 - docs/sources/flow/release-notes.md | 634 --------- docs/sources/flow/tasks/_index.md | 25 - .../flow/tasks/configure-agent-clustering.md | 74 -- docs/sources/flow/tasks/configure/_index.md | 36 - .../distribute-prometheus-scrape-load.md | 68 - .../flow/tasks/estimate-resource-usage.md | 83 -- docs/sources/flow/tasks/migrate/_index.md | 19 - docs/sources/flow/tasks/monitor/_index.md | 24 - .../flow/tasks/monitor/component_metrics.md | 46 - .../flow/tasks/monitor/controller_metrics.md | 44 - docs/sources/flow/tutorials/_index.md | 17 - .../flow/tutorials/flow-by-example/_index.md | 17 - .../tutorials/flow-by-example/get-started.md | 89 -- docs/sources/get-started/_index.md | 13 + .../deploy-alloy.md} | 33 +- docs/sources/get-started/install/_index.md | 31 + .../{flow => }/get-started/install/ansible.md | 20 +- .../{flow => }/get-started/install/binary.md | 23 +- .../{flow => }/get-started/install/chef.md | 21 +- .../{flow => }/get-started/install/docker.md | 27 +- .../sources/get-started/install/kubernetes.md | 53 + .../{flow => }/get-started/install/linux.md | 26 +- docs/sources/get-started/install/macos.md | 70 + .../{flow => }/get-started/install/puppet.md | 21 +- .../{flow => }/get-started/install/windows.md | 36 +- docs/sources/get-started/run/_index.md | 16 + .../{flow => }/get-started/run/binary.md | 19 +- .../{flow => }/get-started/run/linux.md | 18 +- docs/sources/get-started/run/macos.md | 57 + docs/sources/get-started/run/windows.md | 45 + docs/sources/operator/_index.md | 72 -- .../operator/add-custom-scrape-jobs.md | 130 -- docs/sources/operator/api.md | 566 -------- docs/sources/operator/architecture.md | 206 --- .../deploy-agent-operator-resources.md | 435 ------- docs/sources/operator/getting-started.md | 156 --- docs/sources/operator/helm-getting-started.md | 71 - docs/sources/operator/hierarchy.dot | 71 - .../sources/operator/operator-integrations.md | 107 -- docs/sources/operator/release-notes.md | 148 --- docs/sources/reference/_index.md | 15 + docs/sources/reference/cli/_index.md | 29 + docs/sources/reference/cli/convert.md | 107 ++ docs/sources/reference/cli/fmt.md | 37 + docs/sources/{flow => }/reference/cli/run.md | 152 +-- .../sources/{flow => }/reference/cli/tools.md | 34 +- .../reference/compatibility/_index.md | 9 +- docs/sources/reference/components/_index.md | 14 + .../reference/components/discovery.azure.md | 57 +- .../reference/components/discovery.consul.md | 79 +- .../components/discovery.consulagent.md | 7 +- .../components/discovery.digitalocean.md | 35 +- .../reference/components/discovery.dns.md | 25 +- .../reference/components/discovery.docker.md | 112 +- .../components/discovery.dockerswarm.md | 56 +- .../reference/components/discovery.ec2.md | 80 +- .../reference/components/discovery.eureka.md | 62 +- .../reference/components/discovery.file.md | 42 +- .../reference/components/discovery.gce.md | 51 +- .../reference/components/discovery.hetzner.md | 68 +- .../reference/components/discovery.http.md | 83 +- .../reference/components/discovery.ionos.md | 48 +- .../reference/components/discovery.kubelet.md | 121 +- .../components/discovery.kubernetes.md | 335 ++--- .../reference/components/discovery.kuma.md | 74 +- .../components/discovery.lightsail.md | 76 +- .../reference/components/discovery.linode.md | 74 +- .../components/discovery.marathon.md | 55 +- .../reference/components/discovery.nerve.md | 19 +- .../reference/components/discovery.nomad.md | 79 +- .../components/discovery.openstack.md | 64 +- .../components/discovery.ovhcloud.md | 22 +- .../reference/components/discovery.process.md | 35 +- .../components/discovery.puppetdb.md | 47 +- .../reference/components/discovery.relabel.md | 70 +- .../components/discovery.scaleway.md | 80 +- .../components/discovery.serverset.md | 27 +- .../reference/components/discovery.triton.md | 24 +- .../reference/components/discovery.uyuni.md | 58 +- .../reference/components/faro.receiver.md | 258 ++++ .../reference/components/local.file.md | 72 ++ .../reference/components/local.file_match.md | 20 +- .../reference/components/loki.echo.md | 22 +- .../reference/components/loki.process.md | 534 ++++---- .../reference/components/loki.relabel.md | 37 +- .../components/loki.rules.kubernetes.md | 54 +- .../reference/components/loki.source.api.md | 18 +- .../components/loki.source.awsfirehose.md | 44 +- .../loki.source.azure_event_hubs.md | 149 +++ .../components/loki.source.cloudflare.md | 34 +- .../components/loki.source.docker.md | 79 +- .../reference/components/loki.source.file.md | 29 +- .../components/loki.source.gcplog.md | 34 +- .../reference/components/loki.source.gelf.md | 19 +- .../components/loki.source.heroku.md | 14 +- .../components/loki.source.journal.md | 44 +- .../reference/components/loki.source.kafka.md | 20 +- .../components/loki.source.kubernetes.md | 101 +- .../loki.source.kubernetes_events.md | 107 +- .../components/loki.source.podlogs.md | 179 ++- .../components/loki.source.syslog.md | 58 +- .../components/loki.source.windowsevent.md | 30 +- .../reference/components/loki.write.md | 120 +- .../components/mimir.rules.kubernetes.md | 51 +- .../reference/components/module.file.md | 44 +- .../reference/components/module.git.md | 53 +- .../reference/components/module.http.md | 47 +- .../reference/components/module.string.md | 29 +- .../components/otelcol.auth.basic.md | 21 +- .../components/otelcol.auth.bearer.md | 36 +- .../components/otelcol.auth.headers.md | 37 +- .../components/otelcol.auth.oauth2.md | 40 +- .../components/otelcol.auth.sigv4.md | 63 +- .../components/otelcol.connector.host_info.md | 9 +- .../otelcol.connector.servicegraph.md | 90 +- .../components/otelcol.connector.spanlogs.md | 34 +- .../otelcol.connector.spanmetrics.md | 16 +- .../otelcol.exporter.loadbalancing.md | 145 +-- .../components/otelcol.exporter.logging.md | 36 +- .../components/otelcol.exporter.loki.md | 54 +- .../components/otelcol.exporter.otlp.md | 93 +- .../components/otelcol.exporter.otlphttp.md | 166 +++ .../components/otelcol.exporter.prometheus.md | 63 +- ...telcol.extension.jaeger_remote_sampling.md | 301 +++++ .../otelcol.processor.attributes.md | 118 +- .../components/otelcol.processor.batch.md | 44 +- .../components/otelcol.processor.discovery.md | 40 +- .../components/otelcol.processor.filter.md | 38 +- .../otelcol.processor.k8sattributes.md | 90 +- .../otelcol.processor.memory_limiter.md | 36 +- ...otelcol.processor.probabilistic_sampler.md | 44 +- .../otelcol.processor.resourcedetection.md | 46 +- .../components/otelcol.processor.span.md | 123 +- .../otelcol.processor.tail_sampling.md | 116 +- .../components/otelcol.processor.transform.md | 36 +- .../components/otelcol.receiver.jaeger.md | 13 +- .../components/otelcol.receiver.kafka.md | 13 +- .../components/otelcol.receiver.loki.md | 11 +- .../components/otelcol.receiver.opencensus.md | 13 +- .../components/otelcol.receiver.otlp.md | 11 +- .../components/otelcol.receiver.prometheus.md | 11 +- .../components/otelcol.receiver.vcenter.md | 14 +- .../components/otelcol.receiver.zipkin.md | 13 +- .../components/prometheus.exporter.apache.md | 11 +- .../components/prometheus.exporter.azure.md | 11 +- .../prometheus.exporter.blackbox.md | 11 +- .../prometheus.exporter.cadvisor.md | 11 +- .../prometheus.exporter.cloudwatch.md | 9 +- .../components/prometheus.exporter.consul.md | 11 +- .../components/prometheus.exporter.dnsmasq.md | 11 +- .../prometheus.exporter.elasticsearch.md | 13 +- .../components/prometheus.exporter.gcp.md | 9 +- .../components/prometheus.exporter.github.md | 11 +- .../components/prometheus.exporter.kafka.md | 11 +- .../prometheus.exporter.memcached.md | 13 +- .../components/prometheus.exporter.mongodb.md | 11 +- .../components/prometheus.exporter.mssql.md | 11 +- .../components/prometheus.exporter.mysql.md | 11 +- .../prometheus.exporter.oracledb.md | 11 +- .../prometheus.exporter.postgres.md | 11 +- .../components/prometheus.exporter.process.md | 11 +- .../components/prometheus.exporter.redis.md | 11 +- .../components/prometheus.exporter.self.md | 11 +- .../components/prometheus.exporter.snmp.md | 11 +- .../prometheus.exporter.snowflake.md | 11 +- .../components/prometheus.exporter.squid.md | 11 +- .../components/prometheus.exporter.statsd.md | 11 +- .../components/prometheus.exporter.unix.md | 11 +- .../components/prometheus.exporter.vsphere.md | 18 +- .../components/prometheus.exporter.windows.md | 11 +- .../prometheus.operator.podmonitors.md | 25 +- .../components/prometheus.operator.probes.md | 25 +- .../prometheus.operator.servicemonitors.md | 25 +- .../components/prometheus.receive_http.md | 17 +- .../components/prometheus.relabel.md | 9 +- .../components/prometheus.remote_write.md | 141 +- .../reference/components/prometheus.scrape.md | 99 +- .../reference/components/pyroscope.ebpf.md | 9 +- .../reference/components/pyroscope.java.md | 9 +- .../reference/components/pyroscope.scrape.md | 23 +- .../reference/components/pyroscope.write.md | 19 +- .../reference/components/remote.http.md | 53 +- .../components/remote.kubernetes.configmap.md | 15 +- .../components/remote.kubernetes.secret.md | 17 +- .../reference/components/remote.s3.md | 9 +- .../reference/components/remote.vault.md | 54 +- .../sources/reference/config-blocks/_index.md | 17 + .../reference/config-blocks/argument.md | 17 +- .../reference/config-blocks/declare.md | 22 +- .../reference/config-blocks/export.md | 20 +- .../reference/config-blocks/http.md | 27 +- .../reference/config-blocks/import.file.md | 16 +- .../reference/config-blocks/import.git.md | 29 +- .../reference/config-blocks/import.http.md | 9 +- .../reference/config-blocks/import.string.md | 14 +- .../reference/config-blocks/logging.md | 14 +- .../reference/config-blocks/remotecfg.md | 24 +- .../reference/config-blocks/tracing.md | 72 +- docs/sources/reference/stdlib/_index.md | 19 + docs/sources/reference/stdlib/coalesce.md | 24 + docs/sources/reference/stdlib/concat.md | 29 + docs/sources/reference/stdlib/constants.md | 28 + docs/sources/reference/stdlib/env.md | 22 + .../{flow => }/reference/stdlib/format.md | 32 +- docs/sources/reference/stdlib/join.md | 26 + docs/sources/reference/stdlib/json_decode.md | 41 + docs/sources/reference/stdlib/json_path.md | 43 + docs/sources/reference/stdlib/nonsensitive.md | 30 + docs/sources/reference/stdlib/replace.md | 22 + docs/sources/reference/stdlib/split.md | 28 + docs/sources/reference/stdlib/to_lower.md | 18 + docs/sources/reference/stdlib/to_upper.md | 18 + docs/sources/reference/stdlib/trim.md | 28 + docs/sources/reference/stdlib/trim_prefix.md | 19 + docs/sources/reference/stdlib/trim_space.md | 18 + docs/sources/reference/stdlib/trim_suffix.md | 18 + docs/sources/release-notes.md | 15 + docs/sources/shared/deploy-agent.md | 126 -- docs/sources/shared/deploy-alloy.md | 123 ++ .../reference/components/azuread-block.md | 20 - .../components/exporter-component-exports.md | 24 - .../http-client-proxy-config-description.md | 23 - .../components/otelcol-compression-field.md | 22 - docs/sources/shared/flow/stability/beta.md | 17 - .../shared/flow/stability/experimental.md | 17 - docs/sources/shared/index.md | 5 +- .../components/authorization-block.md | 8 +- .../reference/components/azuread-block.md | 14 + .../reference/components/basic-auth-block.md | 8 +- .../components/exporter-component-exports.md | 18 + .../components/extract-field-block.md | 8 +- .../components/field-filter-block.md | 9 +- .../components/http-client-config-block.md | 10 +- .../http-client-proxy-config-description.md | 14 + .../components/local-file-arguments-text.md | 8 +- .../reference/components/loki-server-grpc.md | 8 +- .../reference/components/loki-server-http.md | 8 +- .../components/managed_identity-block.md | 8 +- .../components/match-properties-block.md | 8 +- .../reference/components/oauth2-block.md | 10 +- .../components/otelcol-compression-field.md | 16 + .../components/otelcol-debug-metrics-block.md | 8 +- .../otelcol-filter-attribute-block.md | 6 - .../otelcol-filter-library-block.md | 6 - .../otelcol-filter-log-severity-block.md | 6 - .../components/otelcol-filter-regexp-block.md | 6 - .../otelcol-filter-resource-block.md | 6 - .../components/otelcol-grpc-authority.md | 6 - .../components/otelcol-grpc-balancer-name.md | 6 - .../components/otelcol-queue-block.md | 8 +- .../components/otelcol-retry-block.md | 8 +- .../components/otelcol-tls-config-block.md | 8 +- .../reference/components/output-block-logs.md | 8 +- .../components/output-block-metrics.md | 6 - .../components/output-block-traces.md | 8 +- .../reference/components/output-block.md | 8 +- .../components/prom-operator-scrape.md | 6 +- .../reference/components/rule-block-logs.md | 8 +- .../reference/components/rule-block.md | 8 +- .../reference/components/sigv4-block.md | 8 +- .../reference/components/tls-config-block.md | 8 +- .../components/write_relabel_config.md | 14 +- docs/sources/shared/stability/beta.md | 11 + docs/sources/shared/stability/experimental.md | 11 + docs/sources/shared/wal-data-retention.md | 116 -- docs/sources/stability.md | 30 +- docs/sources/static/_index.md | 97 -- docs/sources/static/api/_index.md | 539 -------- docs/sources/static/configuration/_index.md | 159 --- .../static/configuration/agent-management.md | 171 --- .../configuration/create-config-file.md | 192 --- docs/sources/static/configuration/flags.md | 158 --- .../configuration/integrations/_index.md | 153 --- .../integrations/apache-exporter-config.md | 73 -- .../integrations/azure-exporter-config.md | 299 ----- .../integrations/blackbox-config.md | 120 -- .../integrations/cadvisor-config.md | 121 -- .../cloudwatch-exporter-config.md | 468 ------- .../integrations/consul-exporter-config.md | 107 -- .../integrations/dnsmasq-exporter-config.md | 87 -- .../elasticsearch-exporter-config.md | 127 -- .../integrations/gcp-exporter-config.md | 189 --- .../integrations/github-exporter-config.md | 89 -- .../integrations/integrations-next/_index.md | 217 ---- .../app-agent-receiver-config.md | 124 -- .../integrations-next/blackbox-config.md | 108 -- .../integrations-next/eventhandler-config.md | 251 ---- .../integrations-next/snmp-config.md | 178 --- .../integrations-next/vsphere-config.md | 86 -- .../integrations/kafka-exporter-config.md | 126 -- .../integrations/memcached-exporter-config.md | 110 -- .../integrations/mongodb_exporter-config.md | 92 -- .../integrations/mssql-config.md | 333 ----- .../integrations/mysqld-exporter-config.md | 173 --- .../integrations/node-exporter-config.md | 435 ------- .../integrations/oracledb-config.md | 90 -- .../integrations/postgres-exporter-config.md | 110 -- .../integrations/process-exporter-config.md | 186 --- .../integrations/redis-exporter-config.md | 170 --- .../configuration/integrations/snmp-config.md | 193 --- .../integrations/snowflake-config.md | 88 -- .../integrations/squid-config.md | 86 -- .../integrations/statsd-exporter-config.md | 128 -- .../integrations/windows-exporter-config.md | 175 --- .../static/configuration/logs-config.md | 136 -- .../static/configuration/metrics-config.md | 352 ----- .../static/configuration/scraping-service.md | 193 --- .../static/configuration/server-config.md | 113 -- .../static/configuration/traces-config.md | 482 ------- docs/sources/static/operation-guide/_index.md | 205 --- docs/sources/static/release-notes.md | 1140 ----------------- docs/sources/static/set-up/_index.md | 17 - docs/sources/static/set-up/deploy-agent.md | 393 ------ docs/sources/static/set-up/install/_index.md | 47 - .../set-up/install/install-agent-binary.md | 66 - .../set-up/install/install-agent-docker.md | 78 -- .../install/install-agent-kubernetes.md | 63 - .../set-up/install/install-agent-linux.md | 225 ---- .../set-up/install/install-agent-macos.md | 93 -- .../install/install-agent-on-windows.md | 177 --- docs/sources/static/set-up/quick-starts.md | 29 - docs/sources/static/set-up/start-agent.md | 162 --- docs/sources/tasks/_index.md | 13 + .../tasks/collect-opentelemetry-data.md | 37 +- .../tasks/collect-prometheus-metrics.md | 31 +- .../tasks/configure-agent-clustering.md | 60 + docs/sources/tasks/configure/_index.md | 22 + .../tasks/configure/configure-kubernetes.md | 30 +- .../tasks/configure/configure-linux.md | 42 +- .../tasks/configure/configure-macos.md | 34 +- .../tasks/configure/configure-windows.md | 23 +- docs/sources/{flow => }/tasks/debug.md | 52 +- .../distribute-prometheus-scrape-load.md | 51 + docs/sources/tasks/estimate-resource-usage.md | 58 + docs/sources/tasks/migrate/_index.md | 13 + .../{flow => }/tasks/migrate/from-operator.md | 65 +- .../tasks/migrate/from-prometheus.md | 59 +- .../{flow => }/tasks/migrate/from-promtail.md | 54 +- .../{flow => }/tasks/migrate/from-static.md | 131 +- docs/sources/tasks/monitor/_index.md | 13 + .../tasks/monitor/component_metrics.md | 28 + .../tasks/monitor/controller_metrics.md | 27 + .../tasks/opentelemetry-to-lgtm-stack.md | 57 +- docs/sources/tutorials/_index.md | 12 + .../tutorials/assets/docker-compose.yaml | 0 .../tutorials/assets/flow_configs/agent.river | 0 .../assets/flow_configs/example.river | 0 .../assets/flow_configs/multiple-inputs.river | 0 .../assets/flow_configs/relabel.river | 0 .../{flow => }/tutorials/assets/generate.sh | 0 .../assets/grafana/config/grafana.ini | 0 .../dashboards-provisioning/dashboards.yaml | 0 .../assets/grafana/dashboards/agent.json | 0 .../grafana/dashboards/template.jsonnet | 0 .../assets/grafana/datasources/datasource.yml | 0 .../tutorials/assets/mimir/mimir.yaml | 0 .../{flow => }/tutorials/assets/runt.sh | 0 docs/sources/{flow => }/tutorials/chaining.md | 29 +- .../collecting-prometheus-metrics.md | 45 +- .../{flow => }/tutorials/filtering-metrics.md | 23 +- .../tutorials/flow-by-example/_index.md | 12 + .../first-components-and-stdlib/index.md | 108 +- .../tutorials/flow-by-example/get-started.md | 97 ++ .../logs-and-relabeling-basics/index.md | 82 +- .../flow-by-example/processing-logs/index.md | 50 +- docs/variables.mk | 2 +- .../tools/docs_generator/docs_updated_test.go | 2 +- .../tools/docs_generator/links_to_types.go | 2 +- 429 files changed, 6289 insertions(+), 21843 deletions(-) create mode 100644 docs/sources/concepts/_index.md create mode 100644 docs/sources/concepts/clustering.md rename docs/sources/{flow => }/concepts/component_controller.md (81%) rename docs/sources/{flow => }/concepts/components.md (91%) rename docs/sources/{flow => }/concepts/config-language/_index.md (77%) rename docs/sources/{flow => }/concepts/config-language/components.md (67%) create mode 100644 docs/sources/concepts/config-language/expressions/_index.md create mode 100644 docs/sources/concepts/config-language/expressions/function_calls.md rename docs/sources/{flow => }/concepts/config-language/expressions/operators.md (77%) rename docs/sources/{flow => }/concepts/config-language/expressions/referencing_exports.md (51%) rename docs/sources/{flow => }/concepts/config-language/expressions/types_and_values.md (82%) create mode 100644 docs/sources/concepts/config-language/files.md rename docs/sources/{flow => }/concepts/config-language/syntax.md (61%) rename docs/sources/{flow => }/concepts/custom_components.md (57%) rename docs/sources/{flow => }/concepts/modules.md (80%) delete mode 100644 docs/sources/flow/_index.md delete mode 100644 docs/sources/flow/concepts/_index.md delete mode 100644 docs/sources/flow/concepts/clustering.md delete mode 100644 docs/sources/flow/concepts/config-language/expressions/_index.md delete mode 100644 docs/sources/flow/concepts/config-language/expressions/function_calls.md delete mode 100644 docs/sources/flow/concepts/config-language/files.md delete mode 100644 docs/sources/flow/get-started/_index.md delete mode 100644 docs/sources/flow/get-started/install/_index.md delete mode 100644 docs/sources/flow/get-started/install/kubernetes.md delete mode 100644 docs/sources/flow/get-started/install/macos.md delete mode 100644 docs/sources/flow/get-started/run/_index.md delete mode 100644 docs/sources/flow/get-started/run/macos.md delete mode 100644 docs/sources/flow/get-started/run/windows.md delete mode 100644 docs/sources/flow/reference/_index.md delete mode 100644 docs/sources/flow/reference/cli/_index.md delete mode 100644 docs/sources/flow/reference/cli/convert.md delete mode 100644 docs/sources/flow/reference/cli/fmt.md delete mode 100644 docs/sources/flow/reference/components/_index.md delete mode 100644 docs/sources/flow/reference/components/faro.receiver.md delete mode 100644 docs/sources/flow/reference/components/local.file.md delete mode 100644 docs/sources/flow/reference/components/loki.source.azure_event_hubs.md delete mode 100644 docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md delete mode 100644 docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md delete mode 100644 docs/sources/flow/reference/config-blocks/_index.md delete mode 100644 docs/sources/flow/reference/stdlib/_index.md delete mode 100644 docs/sources/flow/reference/stdlib/coalesce.md delete mode 100644 docs/sources/flow/reference/stdlib/concat.md delete mode 100644 docs/sources/flow/reference/stdlib/constants.md delete mode 100644 docs/sources/flow/reference/stdlib/env.md delete mode 100644 docs/sources/flow/reference/stdlib/join.md delete mode 100644 docs/sources/flow/reference/stdlib/json_decode.md delete mode 100644 docs/sources/flow/reference/stdlib/json_path.md delete mode 100644 docs/sources/flow/reference/stdlib/nonsensitive.md delete mode 100644 docs/sources/flow/reference/stdlib/replace.md delete mode 100644 docs/sources/flow/reference/stdlib/split.md delete mode 100644 docs/sources/flow/reference/stdlib/to_lower.md delete mode 100644 docs/sources/flow/reference/stdlib/to_upper.md delete mode 100644 docs/sources/flow/reference/stdlib/trim.md delete mode 100644 docs/sources/flow/reference/stdlib/trim_prefix.md delete mode 100644 docs/sources/flow/reference/stdlib/trim_space.md delete mode 100644 docs/sources/flow/reference/stdlib/trim_suffix.md delete mode 100644 docs/sources/flow/release-notes.md delete mode 100644 docs/sources/flow/tasks/_index.md delete mode 100644 docs/sources/flow/tasks/configure-agent-clustering.md delete mode 100644 docs/sources/flow/tasks/configure/_index.md delete mode 100644 docs/sources/flow/tasks/distribute-prometheus-scrape-load.md delete mode 100644 docs/sources/flow/tasks/estimate-resource-usage.md delete mode 100644 docs/sources/flow/tasks/migrate/_index.md delete mode 100644 docs/sources/flow/tasks/monitor/_index.md delete mode 100644 docs/sources/flow/tasks/monitor/component_metrics.md delete mode 100644 docs/sources/flow/tasks/monitor/controller_metrics.md delete mode 100644 docs/sources/flow/tutorials/_index.md delete mode 100644 docs/sources/flow/tutorials/flow-by-example/_index.md delete mode 100644 docs/sources/flow/tutorials/flow-by-example/get-started.md create mode 100644 docs/sources/get-started/_index.md rename docs/sources/{flow/get-started/deploy-agent.md => get-started/deploy-alloy.md} (66%) create mode 100644 docs/sources/get-started/install/_index.md rename docs/sources/{flow => }/get-started/install/ansible.md (70%) rename docs/sources/{flow => }/get-started/install/binary.md (50%) rename docs/sources/{flow => }/get-started/install/chef.md (76%) rename docs/sources/{flow => }/get-started/install/docker.md (66%) create mode 100644 docs/sources/get-started/install/kubernetes.md rename docs/sources/{flow => }/get-started/install/linux.md (65%) create mode 100644 docs/sources/get-started/install/macos.md rename docs/sources/{flow => }/get-started/install/puppet.md (72%) rename docs/sources/{flow => }/get-started/install/windows.md (57%) create mode 100644 docs/sources/get-started/run/_index.md rename docs/sources/{flow => }/get-started/run/binary.md (78%) rename docs/sources/{flow => }/get-started/run/linux.md (63%) create mode 100644 docs/sources/get-started/run/macos.md create mode 100644 docs/sources/get-started/run/windows.md delete mode 100644 docs/sources/operator/_index.md delete mode 100644 docs/sources/operator/add-custom-scrape-jobs.md delete mode 100644 docs/sources/operator/api.md delete mode 100644 docs/sources/operator/architecture.md delete mode 100644 docs/sources/operator/deploy-agent-operator-resources.md delete mode 100644 docs/sources/operator/getting-started.md delete mode 100644 docs/sources/operator/helm-getting-started.md delete mode 100644 docs/sources/operator/hierarchy.dot delete mode 100644 docs/sources/operator/operator-integrations.md delete mode 100644 docs/sources/operator/release-notes.md create mode 100644 docs/sources/reference/_index.md create mode 100644 docs/sources/reference/cli/_index.md create mode 100644 docs/sources/reference/cli/convert.md create mode 100644 docs/sources/reference/cli/fmt.md rename docs/sources/{flow => }/reference/cli/run.md (56%) rename docs/sources/{flow => }/reference/cli/tools.md (69%) rename docs/sources/{flow => }/reference/compatibility/_index.md (97%) create mode 100644 docs/sources/reference/components/_index.md rename docs/sources/{flow => }/reference/components/discovery.azure.md (67%) rename docs/sources/{flow => }/reference/components/discovery.consul.md (51%) rename docs/sources/{flow => }/reference/components/discovery.consulagent.md (94%) rename docs/sources/{flow => }/reference/components/discovery.digitalocean.md (76%) rename docs/sources/{flow => }/reference/components/discovery.dns.md (70%) rename docs/sources/{flow => }/reference/components/discovery.docker.md (59%) rename docs/sources/{flow => }/reference/components/discovery.dockerswarm.md (79%) rename docs/sources/{flow => }/reference/components/discovery.ec2.md (62%) rename docs/sources/{flow => }/reference/components/discovery.eureka.md (61%) rename docs/sources/{flow => }/reference/components/discovery.file.md (71%) rename docs/sources/{flow => }/reference/components/discovery.gce.md (57%) rename docs/sources/{flow => }/reference/components/discovery.hetzner.md (65%) rename docs/sources/{flow => }/reference/components/discovery.http.md (54%) rename docs/sources/{flow => }/reference/components/discovery.ionos.md (73%) rename docs/sources/{flow => }/reference/components/discovery.kubelet.md (60%) rename docs/sources/{flow => }/reference/components/discovery.kubernetes.md (61%) rename docs/sources/{flow => }/reference/components/discovery.kuma.md (54%) rename docs/sources/{flow => }/reference/components/discovery.lightsail.md (50%) rename docs/sources/{flow => }/reference/components/discovery.linode.md (65%) rename docs/sources/{flow => }/reference/components/discovery.marathon.md (69%) rename docs/sources/{flow => }/reference/components/discovery.nerve.md (86%) rename docs/sources/{flow => }/reference/components/discovery.nomad.md (53%) rename docs/sources/{flow => }/reference/components/discovery.openstack.md (54%) rename docs/sources/{flow => }/reference/components/discovery.ovhcloud.md (88%) rename docs/sources/{flow => }/reference/components/discovery.process.md (83%) rename docs/sources/{flow => }/reference/components/discovery.puppetdb.md (75%) rename docs/sources/{flow => }/reference/components/discovery.relabel.md (53%) rename docs/sources/{flow => }/reference/components/discovery.scaleway.md (55%) rename docs/sources/{flow => }/reference/components/discovery.serverset.md (76%) rename docs/sources/{flow => }/reference/components/discovery.triton.md (83%) rename docs/sources/{flow => }/reference/components/discovery.uyuni.md (62%) create mode 100644 docs/sources/reference/components/faro.receiver.md create mode 100644 docs/sources/reference/components/local.file.md rename docs/sources/{flow => }/reference/components/local.file_match.md (85%) rename docs/sources/{flow => }/reference/components/loki.echo.md (67%) rename docs/sources/{flow => }/reference/components/loki.process.md (81%) rename docs/sources/{flow => }/reference/components/loki.relabel.md (70%) rename docs/sources/{flow => }/reference/components/loki.rules.kubernetes.md (77%) rename docs/sources/{flow => }/reference/components/loki.source.api.md (83%) rename docs/sources/{flow => }/reference/components/loki.source.awsfirehose.md (63%) create mode 100644 docs/sources/reference/components/loki.source.azure_event_hubs.md rename docs/sources/{flow => }/reference/components/loki.source.cloudflare.md (82%) rename docs/sources/{flow => }/reference/components/loki.source.docker.md (55%) rename docs/sources/{flow => }/reference/components/loki.source.file.md (87%) rename docs/sources/{flow => }/reference/components/loki.source.gcplog.md (84%) rename docs/sources/{flow => }/reference/components/loki.source.gelf.md (69%) rename docs/sources/{flow => }/reference/components/loki.source.heroku.md (84%) rename docs/sources/{flow => }/reference/components/loki.source.journal.md (59%) rename docs/sources/{flow => }/reference/components/loki.source.kafka.md (92%) rename docs/sources/{flow => }/reference/components/loki.source.kubernetes.md (56%) rename docs/sources/{flow => }/reference/components/loki.source.kubernetes_events.md (54%) rename docs/sources/{flow => }/reference/components/loki.source.podlogs.md (53%) rename docs/sources/{flow => }/reference/components/loki.source.syslog.md (68%) rename docs/sources/{flow => }/reference/components/loki.source.windowsevent.md (78%) rename docs/sources/{flow => }/reference/components/loki.write.md (61%) rename docs/sources/{flow => }/reference/components/mimir.rules.kubernetes.md (81%) rename docs/sources/{flow => }/reference/components/module.file.md (73%) rename docs/sources/{flow => }/reference/components/module.git.md (68%) rename docs/sources/{flow => }/reference/components/module.http.md (69%) rename docs/sources/{flow => }/reference/components/module.string.md (76%) rename docs/sources/{flow => }/reference/components/otelcol.auth.basic.md (69%) rename docs/sources/{flow => }/reference/components/otelcol.auth.bearer.md (62%) rename docs/sources/{flow => }/reference/components/otelcol.auth.headers.md (71%) rename docs/sources/{flow => }/reference/components/otelcol.auth.oauth2.md (63%) rename docs/sources/{flow => }/reference/components/otelcol.auth.sigv4.md (64%) rename docs/sources/{flow => }/reference/components/otelcol.connector.host_info.md (87%) rename docs/sources/{flow => }/reference/components/otelcol.connector.servicegraph.md (74%) rename docs/sources/{flow => }/reference/components/otelcol.connector.spanlogs.md (88%) rename docs/sources/{flow => }/reference/components/otelcol.connector.spanmetrics.md (96%) rename docs/sources/{flow => }/reference/components/otelcol.exporter.loadbalancing.md (79%) rename docs/sources/{flow => }/reference/components/otelcol.exporter.logging.md (70%) rename docs/sources/{flow => }/reference/components/otelcol.exporter.loki.md (70%) rename docs/sources/{flow => }/reference/components/otelcol.exporter.otlp.md (51%) create mode 100644 docs/sources/reference/components/otelcol.exporter.otlphttp.md rename docs/sources/{flow => }/reference/components/otelcol.exporter.prometheus.md (69%) create mode 100644 docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md rename docs/sources/{flow => }/reference/components/otelcol.processor.attributes.md (80%) rename docs/sources/{flow => }/reference/components/otelcol.processor.batch.md (78%) rename docs/sources/{flow => }/reference/components/otelcol.processor.discovery.md (80%) rename docs/sources/{flow => }/reference/components/otelcol.processor.filter.md (89%) rename docs/sources/{flow => }/reference/components/otelcol.processor.k8sattributes.md (72%) rename docs/sources/{flow => }/reference/components/otelcol.processor.memory_limiter.md (69%) rename docs/sources/{flow => }/reference/components/otelcol.processor.probabilistic_sampler.md (70%) rename docs/sources/{flow => }/reference/components/otelcol.processor.resourcedetection.md (95%) rename docs/sources/{flow => }/reference/components/otelcol.processor.span.md (65%) rename docs/sources/{flow => }/reference/components/otelcol.processor.tail_sampling.md (80%) rename docs/sources/{flow => }/reference/components/otelcol.processor.transform.md (94%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.jaeger.md (92%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.kafka.md (93%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.loki.md (80%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.opencensus.md (89%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.otlp.md (92%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.prometheus.md (80%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.vcenter.md (92%) rename docs/sources/{flow => }/reference/components/otelcol.receiver.zipkin.md (84%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.apache.md (81%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.azure.md (95%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.blackbox.md (90%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.cadvisor.md (88%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.cloudwatch.md (97%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.consul.md (89%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.dnsmasq.md (81%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.elasticsearch.md (89%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.gcp.md (94%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.github.md (84%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.kafka.md (92%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.memcached.md (79%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.mongodb.md (85%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.mssql.md (95%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.mysql.md (95%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.oracledb.md (83%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.postgres.md (92%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.process.md (90%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.redis.md (93%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.self.md (80%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.snmp.md (91%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.snowflake.md (83%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.squid.md (81%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.statsd.md (90%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.unix.md (98%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.vsphere.md (80%) rename docs/sources/{flow => }/reference/components/prometheus.exporter.windows.md (96%) rename docs/sources/{flow => }/reference/components/prometheus.operator.podmonitors.md (87%) rename docs/sources/{flow => }/reference/components/prometheus.operator.probes.md (87%) rename docs/sources/{flow => }/reference/components/prometheus.operator.servicemonitors.md (87%) rename docs/sources/{flow => }/reference/components/prometheus.receive_http.md (87%) rename docs/sources/{flow => }/reference/components/prometheus.relabel.md (92%) rename docs/sources/{flow => }/reference/components/prometheus.remote_write.md (71%) rename docs/sources/{flow => }/reference/components/prometheus.scrape.md (63%) rename docs/sources/{flow => }/reference/components/pyroscope.ebpf.md (95%) rename docs/sources/{flow => }/reference/components/pyroscope.java.md (92%) rename docs/sources/{flow => }/reference/components/pyroscope.scrape.md (95%) rename docs/sources/{flow => }/reference/components/pyroscope.write.md (84%) rename docs/sources/{flow => }/reference/components/remote.http.md (54%) rename docs/sources/{flow => }/reference/components/remote.kubernetes.configmap.md (87%) rename docs/sources/{flow => }/reference/components/remote.kubernetes.secret.md (87%) rename docs/sources/{flow => }/reference/components/remote.s3.md (87%) rename docs/sources/{flow => }/reference/components/remote.vault.md (82%) create mode 100644 docs/sources/reference/config-blocks/_index.md rename docs/sources/{flow => }/reference/config-blocks/argument.md (71%) rename docs/sources/{flow => }/reference/config-blocks/declare.md (56%) rename docs/sources/{flow => }/reference/config-blocks/export.md (57%) rename docs/sources/{flow => }/reference/config-blocks/http.md (94%) rename docs/sources/{flow => }/reference/config-blocks/import.file.md (63%) rename docs/sources/{flow => }/reference/config-blocks/import.git.md (71%) rename docs/sources/{flow => }/reference/config-blocks/import.http.md (72%) rename docs/sources/{flow => }/reference/config-blocks/import.string.md (65%) rename docs/sources/{flow => }/reference/config-blocks/logging.md (85%) rename docs/sources/{flow => }/reference/config-blocks/remotecfg.md (71%) rename docs/sources/{flow => }/reference/config-blocks/tracing.md (60%) create mode 100644 docs/sources/reference/stdlib/_index.md create mode 100644 docs/sources/reference/stdlib/coalesce.md create mode 100644 docs/sources/reference/stdlib/concat.md create mode 100644 docs/sources/reference/stdlib/constants.md create mode 100644 docs/sources/reference/stdlib/env.md rename docs/sources/{flow => }/reference/stdlib/format.md (66%) create mode 100644 docs/sources/reference/stdlib/join.md create mode 100644 docs/sources/reference/stdlib/json_decode.md create mode 100644 docs/sources/reference/stdlib/json_path.md create mode 100644 docs/sources/reference/stdlib/nonsensitive.md create mode 100644 docs/sources/reference/stdlib/replace.md create mode 100644 docs/sources/reference/stdlib/split.md create mode 100644 docs/sources/reference/stdlib/to_lower.md create mode 100644 docs/sources/reference/stdlib/to_upper.md create mode 100644 docs/sources/reference/stdlib/trim.md create mode 100644 docs/sources/reference/stdlib/trim_prefix.md create mode 100644 docs/sources/reference/stdlib/trim_space.md create mode 100644 docs/sources/reference/stdlib/trim_suffix.md create mode 100644 docs/sources/release-notes.md delete mode 100644 docs/sources/shared/deploy-agent.md create mode 100644 docs/sources/shared/deploy-alloy.md delete mode 100644 docs/sources/shared/flow/reference/components/azuread-block.md delete mode 100644 docs/sources/shared/flow/reference/components/exporter-component-exports.md delete mode 100644 docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md delete mode 100644 docs/sources/shared/flow/reference/components/otelcol-compression-field.md delete mode 100644 docs/sources/shared/flow/stability/beta.md delete mode 100644 docs/sources/shared/flow/stability/experimental.md rename docs/sources/shared/{flow => }/reference/components/authorization-block.md (53%) create mode 100644 docs/sources/shared/reference/components/azuread-block.md rename docs/sources/shared/{flow => }/reference/components/basic-auth-block.md (52%) create mode 100644 docs/sources/shared/reference/components/exporter-component-exports.md rename docs/sources/shared/{flow => }/reference/components/extract-field-block.md (78%) rename docs/sources/shared/{flow => }/reference/components/field-filter-block.md (58%) rename docs/sources/shared/{flow => }/reference/components/http-client-config-block.md (67%) create mode 100644 docs/sources/shared/reference/components/http-client-proxy-config-description.md rename docs/sources/shared/{flow => }/reference/components/local-file-arguments-text.md (64%) rename docs/sources/shared/{flow => }/reference/components/loki-server-grpc.md (80%) rename docs/sources/shared/{flow => }/reference/components/loki-server-http.md (73%) rename docs/sources/shared/{flow => }/reference/components/managed_identity-block.md (55%) rename docs/sources/shared/{flow => }/reference/components/match-properties-block.md (71%) rename docs/sources/shared/{flow => }/reference/components/oauth2-block.md (72%) create mode 100644 docs/sources/shared/reference/components/otelcol-compression-field.md rename docs/sources/shared/{flow => }/reference/components/otelcol-debug-metrics-block.md (59%) rename docs/sources/shared/{flow => }/reference/components/otelcol-filter-attribute-block.md (59%) rename docs/sources/shared/{flow => }/reference/components/otelcol-filter-library-block.md (59%) rename docs/sources/shared/{flow => }/reference/components/otelcol-filter-log-severity-block.md (72%) rename docs/sources/shared/{flow => }/reference/components/otelcol-filter-regexp-block.md (64%) rename docs/sources/shared/{flow => }/reference/components/otelcol-filter-resource-block.md (58%) rename docs/sources/shared/{flow => }/reference/components/otelcol-grpc-authority.md (59%) rename docs/sources/shared/{flow => }/reference/components/otelcol-grpc-balancer-name.md (60%) rename docs/sources/shared/{flow => }/reference/components/otelcol-queue-block.md (72%) rename docs/sources/shared/{flow => }/reference/components/otelcol-retry-block.md (77%) rename docs/sources/shared/{flow => }/reference/components/otelcol-tls-config-block.md (77%) rename docs/sources/shared/{flow => }/reference/components/output-block-logs.md (55%) rename docs/sources/shared/{flow => }/reference/components/output-block-metrics.md (56%) rename docs/sources/shared/{flow => }/reference/components/output-block-traces.md (55%) rename docs/sources/shared/{flow => }/reference/components/output-block.md (63%) rename docs/sources/shared/{flow => }/reference/components/prom-operator-scrape.md (68%) rename docs/sources/shared/{flow => }/reference/components/rule-block-logs.md (87%) rename docs/sources/shared/{flow => }/reference/components/rule-block.md (87%) rename docs/sources/shared/{flow => }/reference/components/sigv4-block.md (65%) rename docs/sources/shared/{flow => }/reference/components/tls-config-block.md (77%) rename docs/sources/shared/{flow => }/reference/components/write_relabel_config.md (84%) create mode 100644 docs/sources/shared/stability/beta.md create mode 100644 docs/sources/shared/stability/experimental.md delete mode 100644 docs/sources/shared/wal-data-retention.md delete mode 100644 docs/sources/static/_index.md delete mode 100644 docs/sources/static/api/_index.md delete mode 100644 docs/sources/static/configuration/_index.md delete mode 100644 docs/sources/static/configuration/agent-management.md delete mode 100644 docs/sources/static/configuration/create-config-file.md delete mode 100644 docs/sources/static/configuration/flags.md delete mode 100644 docs/sources/static/configuration/integrations/_index.md delete mode 100644 docs/sources/static/configuration/integrations/apache-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/azure-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/blackbox-config.md delete mode 100644 docs/sources/static/configuration/integrations/cadvisor-config.md delete mode 100644 docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/consul-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/gcp-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/github-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/_index.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/snmp-config.md delete mode 100644 docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md delete mode 100644 docs/sources/static/configuration/integrations/kafka-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/memcached-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/mongodb_exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/mssql-config.md delete mode 100644 docs/sources/static/configuration/integrations/mysqld-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/node-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/oracledb-config.md delete mode 100644 docs/sources/static/configuration/integrations/postgres-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/process-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/redis-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/snmp-config.md delete mode 100644 docs/sources/static/configuration/integrations/snowflake-config.md delete mode 100644 docs/sources/static/configuration/integrations/squid-config.md delete mode 100644 docs/sources/static/configuration/integrations/statsd-exporter-config.md delete mode 100644 docs/sources/static/configuration/integrations/windows-exporter-config.md delete mode 100644 docs/sources/static/configuration/logs-config.md delete mode 100644 docs/sources/static/configuration/metrics-config.md delete mode 100644 docs/sources/static/configuration/scraping-service.md delete mode 100644 docs/sources/static/configuration/server-config.md delete mode 100644 docs/sources/static/configuration/traces-config.md delete mode 100644 docs/sources/static/operation-guide/_index.md delete mode 100644 docs/sources/static/release-notes.md delete mode 100644 docs/sources/static/set-up/_index.md delete mode 100644 docs/sources/static/set-up/deploy-agent.md delete mode 100644 docs/sources/static/set-up/install/_index.md delete mode 100644 docs/sources/static/set-up/install/install-agent-binary.md delete mode 100644 docs/sources/static/set-up/install/install-agent-docker.md delete mode 100644 docs/sources/static/set-up/install/install-agent-kubernetes.md delete mode 100644 docs/sources/static/set-up/install/install-agent-linux.md delete mode 100644 docs/sources/static/set-up/install/install-agent-macos.md delete mode 100644 docs/sources/static/set-up/install/install-agent-on-windows.md delete mode 100644 docs/sources/static/set-up/quick-starts.md delete mode 100644 docs/sources/static/set-up/start-agent.md create mode 100644 docs/sources/tasks/_index.md rename docs/sources/{flow => }/tasks/collect-opentelemetry-data.md (81%) rename docs/sources/{flow => }/tasks/collect-prometheus-metrics.md (87%) create mode 100644 docs/sources/tasks/configure-agent-clustering.md create mode 100644 docs/sources/tasks/configure/_index.md rename docs/sources/{flow => }/tasks/configure/configure-kubernetes.md (55%) rename docs/sources/{flow => }/tasks/configure/configure-linux.md (56%) rename docs/sources/{flow => }/tasks/configure/configure-macos.md (58%) rename docs/sources/{flow => }/tasks/configure/configure-windows.md (66%) rename docs/sources/{flow => }/tasks/debug.md (61%) create mode 100644 docs/sources/tasks/distribute-prometheus-scrape-load.md create mode 100644 docs/sources/tasks/estimate-resource-usage.md create mode 100644 docs/sources/tasks/migrate/_index.md rename docs/sources/{flow => }/tasks/migrate/from-operator.md (69%) rename docs/sources/{flow => }/tasks/migrate/from-prometheus.md (71%) rename docs/sources/{flow => }/tasks/migrate/from-promtail.md (73%) rename docs/sources/{flow => }/tasks/migrate/from-static.md (60%) create mode 100644 docs/sources/tasks/monitor/_index.md create mode 100644 docs/sources/tasks/monitor/component_metrics.md create mode 100644 docs/sources/tasks/monitor/controller_metrics.md rename docs/sources/{flow => }/tasks/opentelemetry-to-lgtm-stack.md (76%) create mode 100644 docs/sources/tutorials/_index.md rename docs/sources/{flow => }/tutorials/assets/docker-compose.yaml (100%) rename docs/sources/{flow => }/tutorials/assets/flow_configs/agent.river (100%) rename docs/sources/{flow => }/tutorials/assets/flow_configs/example.river (100%) rename docs/sources/{flow => }/tutorials/assets/flow_configs/multiple-inputs.river (100%) rename docs/sources/{flow => }/tutorials/assets/flow_configs/relabel.river (100%) rename docs/sources/{flow => }/tutorials/assets/generate.sh (100%) rename docs/sources/{flow => }/tutorials/assets/grafana/config/grafana.ini (100%) rename docs/sources/{flow => }/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml (100%) rename docs/sources/{flow => }/tutorials/assets/grafana/dashboards/agent.json (100%) rename docs/sources/{flow => }/tutorials/assets/grafana/dashboards/template.jsonnet (100%) rename docs/sources/{flow => }/tutorials/assets/grafana/datasources/datasource.yml (100%) rename docs/sources/{flow => }/tutorials/assets/mimir/mimir.yaml (100%) rename docs/sources/{flow => }/tutorials/assets/runt.sh (100%) rename docs/sources/{flow => }/tutorials/chaining.md (64%) rename docs/sources/{flow => }/tutorials/collecting-prometheus-metrics.md (60%) rename docs/sources/{flow => }/tutorials/filtering-metrics.md (64%) create mode 100644 docs/sources/tutorials/flow-by-example/_index.md rename docs/sources/{flow => }/tutorials/flow-by-example/first-components-and-stdlib/index.md (58%) create mode 100644 docs/sources/tutorials/flow-by-example/get-started.md rename docs/sources/{flow => }/tutorials/flow-by-example/logs-and-relabeling-basics/index.md (64%) rename docs/sources/{flow => }/tutorials/flow-by-example/processing-logs/index.md (81%) diff --git a/docs/developer/writing-flow-component-documentation.md b/docs/developer/writing-flow-component-documentation.md index ddaf6466e0..ab1d304990 100644 --- a/docs/developer/writing-flow-component-documentation.md +++ b/docs/developer/writing-flow-component-documentation.md @@ -113,13 +113,13 @@ If documenting a beta component, include the following after the header, but before the description of the component: ```markdown -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} ``` If documenting an experimental component, include the following instead: ```markdown -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} ``` ### Usage diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 605655ef6a..6cf14905df 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -1,73 +1,53 @@ --- -aliases: -- /docs/grafana-cloud/agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ -- /docs/grafana-cloud/send-data/agent/ -canonical: https://grafana.com/docs/agent/latest/ -title: Grafana Agent -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/ +title: Grafana Alloy +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.40.0 + ALLOY_RELEASE: $ALLOY_VERSION OTEL_VERSION: v0.87.0 + PRODUCT_NAME: Grafana Alloy + PRODUCT_ROOT_NAME: Alloy --- -# Grafana Agent +# {{% param "PRODUCT_NAME" %}} -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +{{< param "PRODUCT_NAME" >}} is a vendor-neutral, batteries-included telemetry collector with configuration inspired by [Terraform][]. +It is designed to be flexible, performant, and compatible with multiple ecosystems such as Prometheus and OpenTelemetry. -Grafana Agent is based around **components**. Components are wired together to -form programmable observability **pipelines** for telemetry collection, -processing, and delivery. +{{< param "PRODUCT_NAME" >}} is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{< admonition type="note" >}} -This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. - -For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{< /admonition >}} - -Grafana Agent can collect, transform, and send data to: +{{< param "PRODUCT_NAME" >}} can collect, transform, and send data to: * The [Prometheus][] ecosystem * The [OpenTelemetry][] ecosystem * The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) -[Terraform]: https://terraform.io -[Prometheus]: https://prometheus.io -[OpenTelemetry]: https://opentelemetry.io -[Loki]: https://github.com/grafana/loki -[Grafana]: https://github.com/grafana/grafana -[Tempo]: https://github.com/grafana/tempo -[Mimir]: https://github.com/grafana/mimir -[Pyroscope]: https://github.com/grafana/pyroscope +## Why use {{< param "PRODUCT_NAME" >}}? -## Why use Grafana Agent? - -* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and - Grafana open source ecosystems. -* **Every signal**: Collect telemetry data for metrics, logs, traces, and - continuous profiles. -* **Scalable**: Deploy on any number of machines to collect millions of active - series and terabytes of logs. -* **Battle-tested**: Grafana Agent extends the existing battle-tested code from - the Prometheus and OpenTelemetry Collector projects. -* **Powerful**: Write programmable pipelines with ease, and debug them using a - [built-in UI][UI]. -* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and - Apache to get telemetry that's immediately useful. +* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. +* **Every signal**: Collect telemetry data for metrics, logs, traces, and continuous profiles. +* **Scalable**: Deploy on any number of machines to collect millions of active series and terabytes of logs. +* **Battle-tested**: {{< param "PRODUCT_NAME" >}} extends the existing battle-tested code from the Prometheus and OpenTelemetry Collector projects. +* **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. +* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. + + ## Supported platforms * Linux @@ -92,29 +72,19 @@ Grafana Agent can collect, transform, and send data to: ## Release cadence -A new minor release is planned every six weeks for the entire Grafana Agent -project, including Static mode, the Static mode Kubernetes operator, and Flow -mode. +A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards -if needed. The planned release dates for future minor releases do not change if -one minor release is moved. +The release cadence is best-effort: releases may be moved forwards or backwards if needed. +The planned release dates for future minor releases do not change if one minor release is moved. Patch and security releases may be created at any time. -{{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" - -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" - -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" - -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" - -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[Terraform]: https://terraform.io +[Prometheus]: https://prometheus.io +[OpenTelemetry]: https://opentelemetry.io +[Loki]: https://github.com/grafana/loki +[Grafana]: https://github.com/grafana/grafana +[Tempo]: https://github.com/grafana/tempo +[Mimir]: https://github.com/grafana/mimir +[Pyroscope]: https://github.com/grafana/pyroscope +[UI]: ./tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index daf939a62a..6cf14905df 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -1,73 +1,53 @@ --- -aliases: -- /docs/grafana-cloud/agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ -- /docs/grafana-cloud/send-data/agent/ -canonical: https://grafana.com/docs/agent/latest/ -title: Grafana Agent -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/ +title: Grafana Alloy +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: $AGENT_VERSION + ALLOY_RELEASE: $ALLOY_VERSION OTEL_VERSION: v0.87.0 + PRODUCT_NAME: Grafana Alloy + PRODUCT_ROOT_NAME: Alloy --- -# Grafana Agent +# {{% param "PRODUCT_NAME" %}} -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +{{< param "PRODUCT_NAME" >}} is a vendor-neutral, batteries-included telemetry collector with configuration inspired by [Terraform][]. +It is designed to be flexible, performant, and compatible with multiple ecosystems such as Prometheus and OpenTelemetry. -Grafana Agent is based around **components**. Components are wired together to -form programmable observability **pipelines** for telemetry collection, -processing, and delivery. +{{< param "PRODUCT_NAME" >}} is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{< admonition type="note" >}} -This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. - -For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{< /admonition >}} - -Grafana Agent can collect, transform, and send data to: +{{< param "PRODUCT_NAME" >}} can collect, transform, and send data to: * The [Prometheus][] ecosystem * The [OpenTelemetry][] ecosystem * The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) -[Terraform]: https://terraform.io -[Prometheus]: https://prometheus.io -[OpenTelemetry]: https://opentelemetry.io -[Loki]: https://github.com/grafana/loki -[Grafana]: https://github.com/grafana/grafana -[Tempo]: https://github.com/grafana/tempo -[Mimir]: https://github.com/grafana/mimir -[Pyroscope]: https://github.com/grafana/pyroscope +## Why use {{< param "PRODUCT_NAME" >}}? -## Why use Grafana Agent? - -* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and - Grafana open source ecosystems. -* **Every signal**: Collect telemetry data for metrics, logs, traces, and - continuous profiles. -* **Scalable**: Deploy on any number of machines to collect millions of active - series and terabytes of logs. -* **Battle-tested**: Grafana Agent extends the existing battle-tested code from - the Prometheus and OpenTelemetry Collector projects. -* **Powerful**: Write programmable pipelines with ease, and debug them using a - [built-in UI][UI]. -* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and - Apache to get telemetry that's immediately useful. +* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. +* **Every signal**: Collect telemetry data for metrics, logs, traces, and continuous profiles. +* **Scalable**: Deploy on any number of machines to collect millions of active series and terabytes of logs. +* **Battle-tested**: {{< param "PRODUCT_NAME" >}} extends the existing battle-tested code from the Prometheus and OpenTelemetry Collector projects. +* **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. +* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. + + ## Supported platforms * Linux @@ -92,29 +72,19 @@ Grafana Agent can collect, transform, and send data to: ## Release cadence -A new minor release is planned every six weeks for the entire Grafana Agent -project, including Static mode, the Static mode Kubernetes operator, and Flow -mode. +A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards -if needed. The planned release dates for future minor releases do not change if -one minor release is moved. +The release cadence is best-effort: releases may be moved forwards or backwards if needed. +The planned release dates for future minor releases do not change if one minor release is moved. Patch and security releases may be created at any time. -{{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" - -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" - -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" - -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" - -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[Terraform]: https://terraform.io +[Prometheus]: https://prometheus.io +[OpenTelemetry]: https://opentelemetry.io +[Loki]: https://github.com/grafana/loki +[Grafana]: https://github.com/grafana/grafana +[Tempo]: https://github.com/grafana/tempo +[Mimir]: https://github.com/grafana/mimir +[Pyroscope]: https://github.com/grafana/pyroscope +[UI]: ./tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/about.md b/docs/sources/about.md index eca262408d..a2df0de45c 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -1,53 +1,66 @@ --- -aliases: -- ./about-agent/ -- /docs/grafana-cloud/agent/about/ -- /docs/grafana-cloud/monitor-infrastructure/agent/about/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/about/ -- /docs/grafana-cloud/send-data/agent/about/ -canonical: https://grafana.com/docs/agent/latest/about/ -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/about/ +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector menuTitle: Introduction -title: Introduction to Grafana Agent -weight: 100 +title: Introduction to Grafana Alloy +weight: 10 --- -# Introduction to Grafana Agent - -Grafana Agent is a flexible, high performance, vendor-neutral telemetry collector. It's fully compatible with the most popular open source observability standards such as OpenTelemetry (OTel) and Prometheus. - -Grafana Agent is available in three different variants: - -- [Static mode][]: The original Grafana Agent. -- [Static mode Kubernetes operator][]: The Kubernetes operator for Static mode. -- [Flow mode][]: The new, component-based Grafana Agent. - -{{% docs/reference %}} -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[Prometheus]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[Prometheus]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[OTel]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[OTel]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[Loki]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" -[Loki]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering/_index.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/clustering/_index.md" -[rules]: "/docs/agent/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" -[rules]: "/docs/grafana-cloud/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" -[vault]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.vault.md" -[vault]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components/remote.vault.md" -{{% /docs/reference %}} +# Introduction to {{% param "PRODUCT_NAME" %}} -[Pyroscope]: https://grafana.com/docs/pyroscope/latest/configure-client/grafana-agent/go_pull -[helm chart]: https://grafana.com/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/config-k8s-helmchart -[sla]: https://grafana.com/legal/grafana-cloud-sla -[observability]: https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/setup#send-telemetry +{{< param "PRODUCT_NAME" >}} is a flexible, high performance, vendor-neutral telemetry collector. It's fully compatible with the most popular open source observability standards such as OpenTelemetry (OTel) and Prometheus. + +{{< param "PRODUCT_NAME" >}} is a _component-based_ revision of {{< param "PRODUCT_ROOT_NAME" >}} with a focus on ease-of-use, +debuggability, and ability to adapt to the needs of power users. + +Components allow for reusability, composability, and focus on a single task. + +* **Reusability** allows for the output of components to be reused as the input for multiple other components. +* **Composability** allows for components to be chained together to form a pipeline. +* **Single task** means the scope of a component is limited to one narrow task and thus has fewer side effects. + +## Features + +* Write declarative configurations with a Terraform-inspired configuration language. +* Declare components to configure parts of a pipeline. +* Use expressions to bind components together to build a programmable pipeline. +* Includes a UI for debugging the state of a pipeline. + +## Example + +```river +// Discover Kubernetes pods to collect metrics from +discovery.kubernetes "pods" { + role = "pod" +} + +// Scrape metrics from Kubernetes pods and send to a prometheus.remote_write +// component. +prometheus.scrape "default" { + targets = discovery.kubernetes.pods.targets + forward_to = [prometheus.remote_write.default.receiver] +} + +// Get an API key from disk. +local.file "apikey" { + filename = "/var/data/my-api-key.txt" + is_secret = true +} + +// Collect and send metrics to a Prometheus remote_write endpoint. +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9009/api/prom/push" + + basic_auth { + username = "MY_USERNAME" + password = local.file.apikey.content + } + } +} +``` + -## Choose which variant of Grafana Agent to run +## {{% param "PRODUCT_NAME" %}} configuration generator + +The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating flow code. + +{{< admonition type="note" >}} +This feature is experimental, and it doesn't support all River components. +{{< /admonition >}} + +## Next steps + +* [Install][] {{< param "PRODUCT_NAME" >}}. +* Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. +* Follow the [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. +* Consult the [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. +* Check out the [Reference][] documentation to find specific information you might be looking for. + +[configuration generator]: https://grafana.github.io/agent-configurator/ +[Install]: ../get-started/install/ +[Concepts]: ../concepts/ +[Tasks]: ../tasks/ +[Tutorials]: ../tutorials/ +[Reference]: ../reference/ + + ### BoringCrypto -[BoringCrypto](https://pkg.go.dev/crypto/internal/boring) is an **EXPERIMENTAL** feature for building Grafana Agent +[BoringCrypto][] is an **EXPERIMENTAL** feature for building {{< param "PRODUCT_NAME" >}} binaries and images with BoringCrypto enabled. Builds and Docker images for Linux arm64/amd64 are made available. -{{% docs/reference %}} -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" -[integrations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/integrations" - -[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components" -{{% /docs/reference %}} +[BoringCrypto]: https://pkg.go.dev/crypto/internal/boring diff --git a/docs/sources/concepts/_index.md b/docs/sources/concepts/_index.md new file mode 100644 index 0000000000..02cc0534d1 --- /dev/null +++ b/docs/sources/concepts/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/ +description: Learn about the Grafana Alloy concepts +title: Concepts +weight: 100 +--- + +# Concepts + +This section explains the primary concepts of {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/concepts/clustering.md b/docs/sources/concepts/clustering.md new file mode 100644 index 0000000000..1d930287f9 --- /dev/null +++ b/docs/sources/concepts/clustering.md @@ -0,0 +1,69 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/clustering/ +description: Learn about Grafana Alloy clustering concepts +labels: + stage: beta +menuTitle: Clustering +title: Clustering (beta) +weight: 500 +--- + +# Clustering (beta) + +Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. +It helps create horizontally scalable deployments with minimal resource and operational overhead. + +To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating {{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. + +The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. + +You configure clustering by passing `cluster` command-line flags to the [run][] command. + +## Use cases + +### Target auto-distribution + +Target auto-distribution is the most basic use case of clustering. +It allows scraping components running on all peers to distribute the scrape load between themselves. +Target auto-distribution requires that all {{< param "PRODUCT_ROOT_NAME" >}} in the same cluster can reach the same service discovery APIs and scrape the same targets. + +You must explicitly enable target auto-distribution on components by defining a `clustering` block. + +```river +prometheus.scrape "default" { + clustering { + enabled = true + } + + ... +} +``` + +A cluster state change is detected when a new node joins or an existing node leaves. +All participating components locally recalculate target ownership and re-balance the number of targets they’re scraping without explicitly communicating ownership over the network. + +Target auto-distribution allows you to dynamically scale the number of {{< param "PRODUCT_ROOT_NAME" >}}s to distribute workload during peaks. +It also provides resiliency because targets are automatically picked up by one of the node peers if a node leaves. + +{{< param "PRODUCT_NAME" >}} uses a local consistent hashing algorithm to distribute targets, meaning that, on average, only ~1/N of the targets are redistributed. + +Refer to component reference documentation to discover whether it supports clustering, such as: + +- [prometheus.scrape][] +- [pyroscope.scrape][] +- [prometheus.operator.podmonitors][] +- [prometheus.operator.servicemonitors][] + +## Cluster monitoring and troubleshooting + +You can use the {{< param "PRODUCT_NAME" >}} UI [clustering page][] to monitor your cluster status. +Refer to [Debugging clustering issues][debugging] for additional troubleshooting information. + + +[run]: ../../reference/cli/run/#clustering-beta +[prometheus.scrape]: ../../reference/components/prometheus.scrape/#clustering-beta +[pyroscope.scrape]: ../../reference/components/pyroscope.scrape/#clustering-beta +[prometheus.operator.podmonitors]: ../../reference/components/prometheus.operator.podmonitors/#clustering-beta +[prometheus.operator.servicemonitors]: ../../reference/components/prometheus.operator.servicemonitors/#clustering-beta +[clustering page]: ../../tasks/debug/#clustering-page +[debugging]: ../../tasks/debug/#debugging-clustering-issues diff --git a/docs/sources/flow/concepts/component_controller.md b/docs/sources/concepts/component_controller.md similarity index 81% rename from docs/sources/flow/concepts/component_controller.md rename to docs/sources/concepts/component_controller.md index 1a19e13b49..b1474bbe23 100644 --- a/docs/sources/flow/concepts/component_controller.md +++ b/docs/sources/concepts/component_controller.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/component-controller/ -- /docs/grafana-cloud/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/component_controller/ +canonical: https://grafana.com/docs/alloy/latest/concepts/component_controller/ description: Learn about the component controller title: Component controller weight: 200 @@ -114,12 +108,6 @@ removing components no longer defined in the configuration file and creating new All components managed by the controller are reevaluated after reloading. [DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph - -{{% docs/reference %}} -[prometheus.exporter.unix]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.exporter.unix.md" -[prometheus.exporter.unix]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -{{% /docs/reference %}} +[prometheus.exporter.unix]: ../../reference/components/prometheus.exporter.unix +[run]: ../../reference/cli/run/ +[Components]: ../components/ diff --git a/docs/sources/flow/concepts/components.md b/docs/sources/concepts/components.md similarity index 91% rename from docs/sources/flow/concepts/components.md rename to docs/sources/concepts/components.md index 1f93d76811..88b9ce223d 100644 --- a/docs/sources/flow/concepts/components.md +++ b/docs/sources/concepts/components.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/components/ -- /docs/grafana-cloud/agent/flow/concepts/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/components/ description: Learn about components title: Components weight: 100 diff --git a/docs/sources/flow/concepts/config-language/_index.md b/docs/sources/concepts/config-language/_index.md similarity index 77% rename from docs/sources/flow/concepts/config-language/_index.md rename to docs/sources/concepts/config-language/_index.md index 80699732f3..799b4586fc 100644 --- a/docs/sources/flow/concepts/config-language/_index.md +++ b/docs/sources/concepts/config-language/_index.md @@ -1,22 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/ -- configuration-language/ # /docs/agent/latest/flow/concepts/configuration-language/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/ -- ../configuration-language/ # /docs/agent/latest/flow/configuration-language/ -- ../concepts/configuration_language/ # /docs/agent/latest/flow/concepts/configuration_language/ -- /docs/grafana-cloud/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/configuration_language/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/ description: Learn about the configuration language title: Configuration language weight: 10 @@ -140,8 +123,4 @@ You can also start developing your own tooling using the {{< param "PRODUCT_ROOT [VSCode]: https://github.com/rfratto/vscode-river [river-mode]: https://github.com/jdbaldry/river-mode [tree-sitter grammar]: https://github.com/grafana/tree-sitter-river - -{{% docs/reference %}} -[fmt]: "/docs/agent/ -> /docs/agent//flow/reference/cli/fmt" -[fmt]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt" -{{% /docs/reference %}} \ No newline at end of file +[fmt]: ../../reference/cli/fmt/ diff --git a/docs/sources/flow/concepts/config-language/components.md b/docs/sources/concepts/config-language/components.md similarity index 67% rename from docs/sources/flow/concepts/config-language/components.md rename to docs/sources/concepts/config-language/components.md index 967d2437da..bb2e609031 100644 --- a/docs/sources/flow/concepts/config-language/components.md +++ b/docs/sources/concepts/config-language/components.md @@ -1,17 +1,5 @@ --- -aliases: -- ../configuration-language/components/ # /docs/agent/latest/flow/concepts/configuration-language/components/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/components/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/components/ # /docs/agent/latest/flow/configuration-language/components/ -- /docs/grafana-cloud/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/components/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/components/ description: Learn about the components configuration language title: Components configuration language weight: 300 @@ -94,11 +82,6 @@ The documentation of each [component][components] provides more information abou In the previous example, the contents of the `local.file.targets.content` expression is evaluated to a concrete value. The value is type-checked and substituted into `prometheus.scrape.default`, where you can configure it. -{{% docs/reference %}} -[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -[controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller" -[controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[components]: ../../../reference/components/ +[controller]: ../../component_controller/ +[type]: ../expressions/types_and_values/ diff --git a/docs/sources/concepts/config-language/expressions/_index.md b/docs/sources/concepts/config-language/expressions/_index.md new file mode 100644 index 0000000000..f91c8aaa2f --- /dev/null +++ b/docs/sources/concepts/config-language/expressions/_index.md @@ -0,0 +1,21 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/ +description: Learn about expressions +title: Expressions +weight: 400 +--- + +# Expressions + +Expressions represent or compute values you can assign to attributes within a configuration. + +Basic expressions are literal values, like `"Hello, world!"` or `true`. +Expressions may also do things like [refer to values][] exported by components, perform arithmetic, or [call functions][]. + +You use expressions when you configure any component. +All component arguments have an underlying [type][]. +River checks the expression type before assigning the result to an attribute. + +[refer to values]: ./referencing_exports/ +[call functions]: ./function_calls/ +[type]: ./types_and_values/ diff --git a/docs/sources/concepts/config-language/expressions/function_calls.md b/docs/sources/concepts/config-language/expressions/function_calls.md new file mode 100644 index 0000000000..da05a2c8f4 --- /dev/null +++ b/docs/sources/concepts/config-language/expressions/function_calls.md @@ -0,0 +1,28 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/function_calls/ +description: Learn about function calls +title: Function calls +weight: 400 +--- + +# Function calls + +You can use River function calls to build richer expressions. + +Functions take zero or more arguments as their input and always return a single value as their output. +You can't construct functions. You can call functions from River's standard library or export them from a component. + +If a function fails, the expression isn't evaluated, and an error is reported. + +## Standard library functions + +River contains a [standard library][] of functions. +Some functions enable interaction with the host system, for example, reading from an environment variable. +Some functions allow for more complex expressions, for example, concatenating arrays or decoding JSON strings into objects. + +```river +env("HOME") +json_decode(local.file.cfg.content)["namespace"] +``` + +[standard library]:../../../../reference/stdlib/ diff --git a/docs/sources/flow/concepts/config-language/expressions/operators.md b/docs/sources/concepts/config-language/expressions/operators.md similarity index 77% rename from docs/sources/flow/concepts/config-language/expressions/operators.md rename to docs/sources/concepts/config-language/expressions/operators.md index 19bb003f74..a554345f04 100644 --- a/docs/sources/flow/concepts/config-language/expressions/operators.md +++ b/docs/sources/concepts/config-language/expressions/operators.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/operators/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/operators/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/configuration-language/expressions/operators/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/operators/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/operators/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/operators/ description: Learn about operators title: Operators weight: 300 diff --git a/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md b/docs/sources/concepts/config-language/expressions/referencing_exports.md similarity index 51% rename from docs/sources/flow/concepts/config-language/expressions/referencing_exports.md rename to docs/sources/concepts/config-language/expressions/referencing_exports.md index 2cc7a8ca5b..00f7030f80 100644 --- a/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md +++ b/docs/sources/concepts/config-language/expressions/referencing_exports.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/referencing-exports/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/configuration-language/expressions/referencing-exports/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/referencing_exports/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/referencing_exports/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/referencing_exports/ description: Learn about referencing component exports title: Referencing component exports weight: 200 @@ -20,8 +8,7 @@ weight: 200 # Referencing component exports Referencing exports enables River to configure and connect components dynamically using expressions. -While components can work in isolation, they're more useful when one component's behavior and data flow are bound to the exports of another, -building a dependency relationship between the two. +While components can work in isolation, they're more useful when one component's behavior and data flow are bound to the exports of another, building a dependency relationship between the two. Such references can only appear as part of another component's arguments or a configuration block's fields. Components can't reference themselves. @@ -60,7 +47,4 @@ After the value is resolved, it must match the [type][] of the attribute it is a While you can only configure attributes using the basic River types, the exports of components can take on special internal River types, such as Secrets or Capsules, which expose different functionality. -{{% docs/reference %}} -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[type]: ../types_and_values/ diff --git a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md b/docs/sources/concepts/config-language/expressions/types_and_values.md similarity index 82% rename from docs/sources/flow/concepts/config-language/expressions/types_and_values.md rename to docs/sources/concepts/config-language/expressions/types_and_values.md index 70afaf7904..88c5046084 100644 --- a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md +++ b/docs/sources/concepts/config-language/expressions/types_and_values.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/types-and-values/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/configuration-language/expressions/types-and-values/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/types_and_values/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/types_and_values/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/types_and_values/ description: Learn about the River types and values title: Types and values weight: 100 @@ -173,8 +161,8 @@ Don't confuse objects with blocks. * An _object_ is a value assigned to an [Attribute][]. You **must** use commas between key-value pairs on separate lines. * A [Block][] is a named structural element composed of multiple attributes. You **must not** use commas between attributes. -[Attribute]: {{< relref "../syntax.md#Attributes" >}} -[Block]: {{< relref "../syntax.md#Blocks" >}} +[Attribute]: ../../syntax/#attributes +[Block]: ../../syntax/#blocks {{< /admonition >}} ## Functions @@ -218,7 +206,4 @@ prometheus.scrape "default" { } ``` -{{% docs/reference %}} -[type]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -{{% /docs/reference %}} \ No newline at end of file +[component reference]: ../../../../reference/components/ diff --git a/docs/sources/concepts/config-language/files.md b/docs/sources/concepts/config-language/files.md new file mode 100644 index 0000000000..ec2bb8689d --- /dev/null +++ b/docs/sources/concepts/config-language/files.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/files/ +description: Learn about River files +title: Files +weight: 100 +--- + +# Files + +River files are plain text files with the `.river` file extension. +You can refer to each River file as a "configuration file" or a "River configuration." + +River files must be UTF-8 encoded and can contain Unicode characters. +River files can use Unix-style line endings (LF) and Windows-style line endings (CRLF), but formatters may replace all line endings with Unix-style ones. diff --git a/docs/sources/flow/concepts/config-language/syntax.md b/docs/sources/concepts/config-language/syntax.md similarity index 61% rename from docs/sources/flow/concepts/config-language/syntax.md rename to docs/sources/concepts/config-language/syntax.md index 6f55701dab..8cfb860241 100644 --- a/docs/sources/flow/concepts/config-language/syntax.md +++ b/docs/sources/concepts/config-language/syntax.md @@ -1,17 +1,5 @@ --- -aliases: -- ../configuration-language/syntax/ # /docs/agent/latest/flow/concepts/configuration-language/syntax/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/syntax/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/syntax/ # /docs/agent/latest/flow/configuration-language/syntax/ -- /docs/grafana-cloud/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/syntax/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/syntax/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/syntax/ description: Learn about the River syntax title: Syntax weight: 200 @@ -50,13 +38,11 @@ log_level = "debug" The `ATTRIBUTE_NAME` must be a valid River [identifier][]. -The `ATTRIBUTE_VALUE` can be either a constant value of a valid River [type][] (for example, a string, boolean, number), -or an [_expression_][expression] to represent or compute more complex attribute values. +The `ATTRIBUTE_VALUE` can be either a constant value of a valid River [type][] (for example, a string, boolean, number), or an [_expression_][expression] to represent or compute more complex attribute values. ### Blocks -You use _Blocks_ to configure the {{< param "PRODUCT_ROOT_NAME" >}}'s behavior as well as {{< param "PRODUCT_NAME" >}} -components by grouping any number of attributes or nested blocks using curly braces. +You use _Blocks_ to configure the {{< param "PRODUCT_ROOT_NAME" >}}'s behavior as well as {{< param "PRODUCT_NAME" >}} components by grouping any number of attributes or nested blocks using curly braces. Blocks have a _name_, an optional _label_ and a body that contains any number of arguments and nested unlabeled blocks. Some blocks can be defined more than once. @@ -97,8 +83,7 @@ If the `BLOCK_LABEL` must be set, it must be a valid River [identifier][] wrappe In these cases, you use the label to disambiguate between multiple top-level blocks of the same name. The following snippet defines a block named `local.file` with its label set to "token". -The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` environment variable by using an expression, -and the `is_secret` attribute is set to the boolean `true`, marking the file content as sensitive. +The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` environment variable by using an expression, and the `is_secret` attribute is set to the boolean `true`, marking the file content as sensitive. ```river local.file "token" { @@ -116,10 +101,6 @@ River ignores other newlines and you can can enter as many newlines as you want. [identifier]: #identifiers [identifier]: #identifiers - -{{% docs/reference %}} -[expression]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions" -[expression]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[expression]: ../expressions/ +[type]: ../expressions/types_and_values +"/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/"/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/ \ No newline at end of file diff --git a/docs/sources/flow/concepts/custom_components.md b/docs/sources/concepts/custom_components.md similarity index 57% rename from docs/sources/flow/concepts/custom_components.md rename to docs/sources/concepts/custom_components.md index 8d7fff13f6..6b70b5a269 100644 --- a/docs/sources/flow/concepts/custom_components.md +++ b/docs/sources/concepts/custom_components.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/custom-components/ -- /docs/grafana-cloud/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/custom-components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/custom-components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/custom-components/ description: Learn about custom components title: Custom components weight: 300 @@ -23,20 +17,17 @@ A custom component is composed of: ## Creating custom components -You can create a new custom component using [the `declare` configuration block][declare]. +You can create a new custom component using [the `declare` configuration block][declare]. The label of the block determines the name of the custom component. The following custom configuration blocks can be used inside a `declare` block: -* [argument][]: Create a new named argument, whose current value can be referenced using the expression `argument.NAME.value`. Argument values are determined by the user of a custom component. +* [argument][]: Create a new named argument, whose current value can be referenced using the expression `argument.NAME.value`. + Argument values are determined by the user of a custom component. * [export][]: Expose a new named value to custom component users. -Custom components are useful for reusing a common pipeline multiple times. To learn how to share custom components across multiple files, refer to [Modules][]. - -[declare]: {{< relref "../reference/config-blocks/declare.md" >}} -[argument]: {{< relref "../reference/config-blocks/argument.md" >}} -[export]: {{< relref "../reference/config-blocks/export.md" >}} -[Modules]: {{< relref "./modules.md" >}} +Custom components are useful for reusing a common pipeline multiple times. +To learn how to share custom components across multiple files, refer to [Modules][]. ## Example @@ -59,3 +50,7 @@ add "example" { // add.example.sum == 32 ``` +[declare]: ../../reference/config-blocks/declare/ +[argument]: ../../reference/config-blocks/argument/ +[export]: ../../reference/config-blocks/export/ +[Modules]: ../modules/ diff --git a/docs/sources/flow/concepts/modules.md b/docs/sources/concepts/modules.md similarity index 80% rename from docs/sources/flow/concepts/modules.md rename to docs/sources/concepts/modules.md index 3302d5fa54..37853be78f 100644 --- a/docs/sources/flow/concepts/modules.md +++ b/docs/sources/concepts/modules.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/modules/ -- /docs/grafana-cloud/agent/flow/concepts/modules/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/modules/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/modules/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/modules/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/modules/ +canonical: https://grafana.com/docs/alloy/latest/concepts/modules/ description: Learn about modules title: Modules weight: 400 @@ -18,23 +12,15 @@ The module passed as an argument to [the `run` command][run] is called the _main Modules can be [imported](#importing-modules) to enable the reuse of [custom components][] defined by that module. -[custom components]: {{< relref "./custom_components.md" >}} -[run]: {{< relref "../reference/cli/run.md" >}} - ## Importing modules A module can be _imported_, allowing the custom components defined by that module to be used by other modules, called the _importing module_. Modules can be imported from multiple locations using one of the `import` configuration blocks: -* [import.file]: Imports a module from a file on disk. -* [import.git]: Imports a module from a file located in a Git repository. -* [import.http]: Imports a module from the response of an HTTP request. -* [import.string]: Imports a module from a string. - -[import.file]: {{< relref "../reference/config-blocks/import.file.md" >}} -[import.git]: {{< relref "../reference/config-blocks/import.git.md" >}} -[import.http]: {{< relref "../reference/config-blocks/import.http.md" >}} -[import.string]: {{< relref "../reference/config-blocks/import.string.md" >}} +* [import.file][]: Imports a module from a file on disk. +* [import.git][]: Imports a module from a file located in a Git repository. +* [import.http][]: Imports a module from the response of an HTTP request. +* [import.string][]: Imports a module from a string. {{< admonition type="warning" >}} You can't import a module that contains top-level blocks other than `declare` or `import`. @@ -42,7 +28,8 @@ You can't import a module that contains top-level blocks other than `declare` or Modules are imported into a _namespace_ where the top-level custom components of the imported module are exposed to the importing module. The label of the import block specifies the namespace of an import. -For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. Imported namespaces must be unique across a given importing module. +For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. +Imported namespaces must be unique across a given importing module. If an import namespace matches the name of a built-in component namespace, such as `prometheus`, the built-in namespace is hidden from the importing module, and only components defined in the imported module may be used. @@ -115,10 +102,11 @@ loki.write "default" { # Classic modules (deprecated) {{< admonition type="caution" >}} -Modules were redesigned in v0.40 to simplify concepts. This section outlines the design of the original modules prior to v0.40. Classic modules are scheduled to be removed in the release after v0.40. +Modules were redesigned in v0.40 to simplify concepts. +This section outlines the design of the original modules prior to v0.40. +Classic modules are scheduled to be removed in the release after v0.40. {{< /admonition >}} - You use _Modules_ to create {{< param "PRODUCT_NAME" >}} configurations that you can load as a component. Modules are a great way to parameterize a configuration to create reusable pipelines. @@ -240,8 +228,15 @@ loki.write "default" { ``` [Module loader]: #module-loaders -[argument block]: https://grafana.com/docs/agent//flow/reference/config-blocks/argument -[export block]: https://grafana.com/docs/agent//flow/reference/config-blocks/export -[Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller -[Components]: https://grafana.com/docs/agent//flow/reference/components +[argument block]: ../../reference/config-blocks/argument/ +[export block]: ../../reference/config-blocks/export/ +[Component controller]: ../component_controller/ +[Components]: ../../reference/components/ {{< /collapse >}} + +[custom components]: ../custom_components/ +[run]: ../../reference/cli/run/ +[import.file]: ../../reference/config-blocks/import.file/ +[import.git]: ../../reference/config-blocks/import.git/ +[import.http]: ../../reference/config-blocks/import.http/ +[import.string]: ../../reference/config-blocks/import.string/ diff --git a/docs/sources/data-collection.md b/docs/sources/data-collection.md index 80fbd874cd..21d2655b00 100644 --- a/docs/sources/data-collection.md +++ b/docs/sources/data-collection.md @@ -1,36 +1,28 @@ --- -aliases: -- ./data-collection/ -- /docs/grafana-cloud/agent/data-collection/ -- /docs/grafana-cloud/monitor-infrastructure/agent/data-collection/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/data-collection/ -- /docs/grafana-cloud/send-data/agent/data-collection/ -canonical: https://grafana.com/docs/agent/latest/data-collection/ -description: Grafana Agent data collection +canonical: https://grafana.com/docs/alloy/latest/data-collection/ +description: Grafana Alloy data collection menuTitle: Data collection -title: Grafana Agent data collection -weight: 500 +title: Grafana Alloy data collection +weight: 900 --- -# Grafana Agent Data collection +# {{% param "PRODUCT_NAME" %}} Data collection -By default, Grafana Agent sends anonymous but uniquely identifiable usage information from -your Grafana Agent instance to Grafana Labs. These statistics are sent to `stats.grafana.org`. +By default, {{< param "PRODUCT_NAME" >}} sends anonymous but uniquely identifiable usage information from your {{< param "PRODUCT_NAME" >}} instance to Grafana Labs. +These statistics are sent to `stats.grafana.org`. -Statistics help us better understand how Grafana Agent is used. This helps us prioritize features and documentation. +Statistics help us better understand how {{< param "PRODUCT_NAME" >}} is used. This helps us prioritize features and documentation. The usage information includes the following details: * A randomly generated, anonymous unique ID (UUID). * Timestamp of when the UID was first generated. * Timestamp of when the report was created (by default, every four hours). -* Version of running Grafana Agent. -* Operating system Grafana Agent is running on. -* System architecture Grafana Agent is running on. -* List of enabled feature flags ([Static] mode only). -* List of enabled integrations ([Static] mode only). -* List of enabled [components][] ([Flow] mode only). -* Method used to deploy Grafana Agent, for example Docker, Helm, RPM, or Operator. +* Version of running {{< param "PRODUCT_NAME" >}}. +* Operating system {{< param "PRODUCT_NAME" >}} is running on. +* System architecture {{< param "PRODUCT_NAME" >}} is running on. +* List of enabled [components][] +* Method used to deploy {{< param "PRODUCT_NAME" >}}, for example Docker, Helm, RPM, or Operator. This list may change over time. All newly reported data is documented in the CHANGELOG. @@ -38,13 +30,5 @@ This list may change over time. All newly reported data is documented in the CHA You can use the `-disable-reporting` [command line flag][] to disable the reporting and opt-out of the data collection. -{{% docs/reference %}} -[command line flag]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[command line flag]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/cli/run.md" -[components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/cli/run.md" -[Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static -[Flow]: "/docs/agent/ -> /docs/agent//flow" -[Flow]: "/docs/grafana-cloud/ -> /docs/agent//flow" -{{% /docs/reference %}} \ No newline at end of file +[components]: ../concepts/components +[command line flag]: ../reference/cli/run diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md deleted file mode 100644 index 1840476a07..0000000000 --- a/docs/sources/flow/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/ -- /docs/grafana-cloud/send-data/agent/flow/ -canonical: https://grafana.com/docs/agent/latest/flow/ -description: Grafana Agent Flow is a component-based revision of Grafana Agent with - a focus on ease-of-use, debuggability, and adaptability -title: Flow mode -weight: 400 -cascade: - PRODUCT_NAME: Grafana Agent Flow - PRODUCT_ROOT_NAME: Grafana Agent ---- - -# {{% param "PRODUCT_NAME" %}} - -{{< param "PRODUCT_NAME" >}} is a _component-based_ revision of {{< param "PRODUCT_ROOT_NAME" >}} with a focus on ease-of-use, -debuggability, and ability to adapt to the needs of power users. - -Components allow for reusability, composability, and focus on a single task. - -* **Reusability** allows for the output of components to be reused as the input for multiple other components. -* **Composability** allows for components to be chained together to form a pipeline. -* **Single task** means the scope of a component is limited to one narrow task and thus has fewer side effects. - -## Features - -* Write declarative configurations with a Terraform-inspired configuration - language. -* Declare components to configure parts of a pipeline. -* Use expressions to bind components together to build a programmable pipeline. -* Includes a UI for debugging the state of a pipeline. - -## Example - -```river -// Discover Kubernetes pods to collect metrics from -discovery.kubernetes "pods" { - role = "pod" -} - -// Scrape metrics from Kubernetes pods and send to a prometheus.remote_write -// component. -prometheus.scrape "default" { - targets = discovery.kubernetes.pods.targets - forward_to = [prometheus.remote_write.default.receiver] -} - -// Get an API key from disk. -local.file "apikey" { - filename = "/var/data/my-api-key.txt" - is_secret = true -} - -// Collect and send metrics to a Prometheus remote_write endpoint. -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9009/api/prom/push" - - basic_auth { - username = "MY_USERNAME" - password = local.file.apikey.content - } - } -} -``` - - -## {{% param "PRODUCT_NAME" %}} configuration generator - -The {{< param "PRODUCT_NAME" >}} [configuration generator](https://grafana.github.io/agent-configurator/) helps you get a head start on creating flow code. - -{{< admonition type="note" >}} -This feature is experimental, and it doesn't support all River components. -{{< /admonition >}} - -## Next steps - -* [Install][] {{< param "PRODUCT_NAME" >}}. -* Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. -* Follow the [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. -* Consult the [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. -* Check out the [Reference][] documentation to find specific information you might be looking for. - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -[Concepts]: "/docs/agent/ -> /docs/agent//flow/concepts/" -[Concepts]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/" -[Tasks]: "/docs/agent/ -> /docs/agent//flow/tasks/" -[Tasks]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/" -[Tutorials]: "/docs/agent/ -> /docs/agent//flow/tutorials/" -[Tutorials]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/ -[Reference]: "/docs/agent/ -> /docs/agent//flow/reference/" -[Reference]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/ -{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/_index.md b/docs/sources/flow/concepts/_index.md deleted file mode 100644 index 786af8e546..0000000000 --- a/docs/sources/flow/concepts/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- ../concepts/ -- /docs/grafana-cloud/agent/flow/concepts/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/ -description: Learn about the Grafana Agent Flow concepts -title: Concepts -weight: 100 ---- - -# Concepts - -This section explains the primary concepts of {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/concepts/clustering.md b/docs/sources/flow/concepts/clustering.md deleted file mode 100644 index e02a6131d4..0000000000 --- a/docs/sources/flow/concepts/clustering.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/clustering/ -description: Learn about Grafana Agent clustering concepts -labels: - stage: beta -menuTitle: Clustering -title: Clustering (beta) -weight: 500 ---- - -# Clustering (beta) - -Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. -It helps create horizontally scalable deployments with minimal resource and operational overhead. - -To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating -{{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. - -The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. - -You configure clustering by passing `cluster` command-line flags to the [run][] command. - -## Use cases - -### Target auto-distribution - -Target auto-distribution is the most basic use case of clustering. -It allows scraping components running on all peers to distribute the scrape load between themselves. -Target auto-distribution requires that all {{< param "PRODUCT_ROOT_NAME" >}} in the same cluster can reach the same service discovery APIs and scrape the same targets. - -You must explicitly enable target auto-distribution on components by defining a `clustering` block. - -```river -prometheus.scrape "default" { - clustering { - enabled = true - } - - ... -} -``` - -A cluster state change is detected when a new node joins or an existing node leaves. -All participating components locally recalculate target ownership and re-balance the number of targets they’re scraping without explicitly communicating ownership over the network. - -Target auto-distribution allows you to dynamically scale the number of {{< param "PRODUCT_ROOT_NAME" >}}s to distribute workload during peaks. -It also provides resiliency because targets are automatically picked up by one of the node peers if a node leaves. - -{{< param "PRODUCT_NAME" >}} uses a local consistent hashing algorithm to distribute targets, meaning that, on average, only ~1/N of the targets are redistributed. - -Refer to component reference documentation to discover whether it supports clustering, such as: - -- [prometheus.scrape][] -- [pyroscope.scrape][] -- [prometheus.operator.podmonitors][] -- [prometheus.operator.servicemonitors][] - -## Cluster monitoring and troubleshooting - -You can use the {{< param "PRODUCT_NAME" >}} UI [clustering page][] to monitor your cluster status. -Refer to [Debugging clustering issues][debugging] for additional troubleshooting information. - -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md#clustering-beta" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md#clustering-beta" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md#clustering-beta" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md#clustering-beta" -[pyroscope.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/pyroscope.scrape.md#clustering-beta" -[pyroscope.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape.md#clustering-beta" -[prometheus.operator.podmonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" -[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" -[prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[clustering page]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#clustering-page" -[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#clustering-page" -[debugging]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#debugging-clustering-issues" -[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#debugging-clustering-issues" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/expressions/_index.md b/docs/sources/flow/concepts/config-language/expressions/_index.md deleted file mode 100644 index 56dc4c1ee4..0000000000 --- a/docs/sources/flow/concepts/config-language/expressions/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -aliases: -- ../configuration-language/expressions/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/expressions/ # /docs/agent/latest/flow/configuration-language/expressions/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/ -description: Learn about expressions -title: Expressions -weight: 400 ---- - -# Expressions - -Expressions represent or compute values you can assign to attributes within a configuration. - -Basic expressions are literal values, like `"Hello, world!"` or `true`. -Expressions may also do things like [refer to values][] exported by components, perform arithmetic, or [call functions][]. - -You use expressions when you configure any component. -All component arguments have an underlying [type][]. -River checks the expression type before assigning the result to an attribute. - -{{% docs/reference %}} -[refer to values]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/referencing_exports" -[refer to values]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports" -[call functions]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/function_calls" -[call functions]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/config-language/expressions/function_calls.md b/docs/sources/flow/concepts/config-language/expressions/function_calls.md deleted file mode 100644 index b9598fea91..0000000000 --- a/docs/sources/flow/concepts/config-language/expressions/function_calls.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/function-calls/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/configuration-language/expressions/function-calls/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/function_calls/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/function_calls/ -description: Learn about function calls -title: Function calls -weight: 400 ---- - -# Function calls - -You can use River function calls to build richer expressions. - -Functions take zero or more arguments as their input and always return a single value as their output. -You can't construct functions. You can call functions from River's standard library or export them from a component. - -If a function fails, the expression isn't evaluated, and an error is reported. - -## Standard library functions - -River contains a [standard library][] of functions. -Some functions enable interaction with the host system, for example, reading from an environment variable. -Some functions allow for more complex expressions, for example, concatenating arrays or decoding JSON strings into objects. - -```river -env("HOME") -json_decode(local.file.cfg.content)["namespace"] -``` - -{{% docs/reference %}} -[standard library]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib" -[standard library]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/files.md b/docs/sources/flow/concepts/config-language/files.md deleted file mode 100644 index bd5565635f..0000000000 --- a/docs/sources/flow/concepts/config-language/files.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../configuration-language/files/ # /docs/agent/latest/flow/concepts/configuration-language/files/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/files/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/files/ # /docs/agent/latest/flow/configuration-language/files/ -- /docs/grafana-cloud/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/files/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/files/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/files/ -description: Learn about River files -title: Files -weight: 100 ---- - -# Files - -River files are plain text files with the `.river` file extension. -You can refer to each River file as a "configuration file" or a "River configuration." - -River files must be UTF-8 encoded and can contain Unicode characters. -River files can use Unix-style line endings (LF) and Windows-style line endings (CRLF), but formatters may replace all line endings with Unix-style ones. diff --git a/docs/sources/flow/get-started/_index.md b/docs/sources/flow/get-started/_index.md deleted file mode 100644 index 444b64f5af..0000000000 --- a/docs/sources/flow/get-started/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/ -- /docs/grafana-cloud/send-data/agent/flow/setup/ -- ./setup/ # /docs/agent/latest/flow/setup/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/ -description: Learn how to install and use Grafana Agent Flow -menuTitle: Get started -title: Get started with Grafana Agent Flow -weight: 50 ---- - -# Get started with {{% param "PRODUCT_NAME" %}} - -This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, -including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. - -{{< section >}} diff --git a/docs/sources/flow/get-started/install/_index.md b/docs/sources/flow/get-started/install/_index.md deleted file mode 100644 index 25b9a5b2f1..0000000000 --- a/docs/sources/flow/get-started/install/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/install/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/ -- /docs/sources/flow/install/ -- ../setup/install/ # /docs/agent/latest/flow/setup/install/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/ -description: Learn how to install Grafana Agent Flow -menuTitle: Install -title: Install Grafana Agent Flow -weight: 50 ---- - -# Install {{% param "PRODUCT_NAME" %}} - -You can install {{< param "PRODUCT_NAME" >}} on Docker, Kubernetes, Linux, macOS, or Windows. - -The following architectures are supported: - -- Linux: AMD64, ARM64 -- Windows: AMD64 -- macOS: AMD64 (Intel), ARM64 (Apple Silicon) -- FreeBSD: AMD64 - -{{< admonition type="note" >}} -Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. -{{< /admonition >}} - -{{< section >}} - -## Data collection - -By default, {{< param "PRODUCT_NAME" >}} sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information -about what data is collected and how you can opt-out. - -{{% docs/reference %}} -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/kubernetes.md b/docs/sources/flow/get-started/install/kubernetes.md deleted file mode 100644 index d045c7b5ce..0000000000 --- a/docs/sources/flow/get-started/install/kubernetes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes/ -# Previous docs aliases for backwards compatibility: -- ../../install/kubernetes/ # /docs/agent/latest/flow/install/kubernetes/ -- /docs/grafana-cloud/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/kubernetes/ -- ../../setup/install/kubernetes/ # /docs/agent/latest/flow/setup/install/kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/kubernetes/ -description: Learn how to deploy Grafana Agent Flow on Kubernetes -menuTitle: Kubernetes -title: Deploy Grafana Agent Flow on Kubernetes -weight: 200 ---- - -# Deploy {{% param "PRODUCT_NAME" %}} on Kubernetes - -{{< param "PRODUCT_NAME" >}} can be deployed on Kubernetes by using the Helm chart for {{< param "PRODUCT_ROOT_NAME" >}}. - -## Before you begin - -* Install [Helm][] on your computer. -* Configure a Kubernetes cluster that you can use for {{< param "PRODUCT_NAME" >}}. -* Configure your local Kubernetes context to point to the cluster. - -## Deploy - -{{< admonition type="note" >}} -These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for {{< param "PRODUCT_NAME" >}}. -You can deploy {{< param "PRODUCT_ROOT_NAME" >}} either in static mode or flow mode. The Helm chart deploys {{< param "PRODUCT_NAME" >}} by default. -{{< /admonition >}} - -To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: - -1. Add the Grafana Helm chart repository: - - ```shell - helm repo add grafana https://grafana.github.io/helm-charts - ``` - -1. Update the Grafana Helm chart repository: - - ```shell - helm repo update - ``` - -1. Install {{< param "PRODUCT_ROOT_NAME" >}}: - - ```shell - helm install grafana/grafana-agent - ``` - - Replace the following: - - - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. - -For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. - -[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -[Helm]: https://helm.sh - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-kubernetes.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/macos.md b/docs/sources/flow/get-started/install/macos.md deleted file mode 100644 index c16f70e6d9..0000000000 --- a/docs/sources/flow/get-started/install/macos.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/macos/ -# Previous docs aliases for backwards compatibility: -- ../../install/macos/ # /docs/agent/latest/flow/install/macos/ -- /docs/grafana-cloud/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/macos/ -- ../../setup/install/macos/ # /docs/agent/latest/flow/setup/install/macos/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/macos/ -description: Learn how to install Grafana AgentFlow on macOS -menuTitle: macOS -title: Install Grafana Agent Flow on macOS -weight: 400 ---- - -# Install {{% param "PRODUCT_NAME" %}} on macOS - -You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . - -{{< admonition type="note" >}} -The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{< /admonition >}} - -## Before you begin - -* Install [Homebrew][] on your computer. - -## Install - -To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. - -1. Add the Grafana Homebrew tap: - - ```shell - brew tap grafana/grafana - ``` - -1. Install {{< param "PRODUCT_NAME" >}}: - - ```shell - brew install grafana-agent-flow - ``` - -## Upgrade - -To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. - -1. Upgrade {{< param "PRODUCT_NAME" >}}: - - ```shell - brew upgrade grafana-agent-flow - ``` - -1. Restart {{< param "PRODUCT_NAME" >}}: - - ```shell - brew services restart grafana-agent-flow - ``` - -## Uninstall - -To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: - -```shell -brew uninstall grafana-agent-flow -``` - -## Next steps - -- [Run {{< param "PRODUCT_NAME" >}}][Run] -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -[Homebrew]: https://brew.sh - -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/macos.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/_index.md b/docs/sources/flow/get-started/run/_index.md deleted file mode 100644 index f98f870735..0000000000 --- a/docs/sources/flow/get-started/run/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/run/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/run/ -- /docs/sources/flow/run/ -# Previous pages aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/start-agent/ -- ../setup/start-agent/ # /docs/agent/latest/flow/setup/start-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/ -description: Learn how to run Grafana Agent Flow -menuTitle: Run -title: Run Grafana Agent Flow -weight: 50 ---- - -# Run {{% param "PRODUCT_NAME" %}} - -Use the following pages to learn how to start, restart, and stop {{< param "PRODUCT_NAME" >}} after it is installed. -For installation instructions, refer to [Install {{< param "PRODUCT_NAME" >}}][Install]. - -{{< section >}} - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/macos.md b/docs/sources/flow/get-started/run/macos.md deleted file mode 100644 index 8c7a055dd8..0000000000 --- a/docs/sources/flow/get-started/run/macos.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/macos/ -description: Learn how to run Grafana Agent Flow on macOS -menuTitle: macOS -title: Run Grafana Agent Flow on macOS -weight: 400 ---- - -# Run {{% param "PRODUCT_NAME" %}} on macOS - -{{< param "PRODUCT_NAME" >}} is [installed][InstallMacOS] as a launchd service on macOS. - -## Start {{% param "PRODUCT_NAME" %}} - -To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services start grafana-agent-flow -``` - -{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. - -(Optional) To verify that the service is running, run the following command in a terminal window: - -```shell -brew services info grafana-agent-flow -``` - -## Restart {{% param "PRODUCT_NAME" %}} - -To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services restart grafana-agent-flow -``` - -## Stop {{% param "PRODUCT_NAME" %}} - -To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services stop grafana-agent-flow -``` - -## View {{% param "PRODUCT_NAME" %}} logs on macOS - -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and -`$(brew --prefix)/var/log/grafana-agent-flow.err.log`. - -If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, -refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][ConfigureMacOS] - -{{% docs/reference %}} -[InstallMacOS]: "/docs/agent/ -> /docs/agent//flow/get-started/install/macos.md" -[InstallMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/macos.md" -[ConfigureMacOS]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" -[ConfigureMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" -[ConfigureService]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" -[ConfigureService]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/windows.md b/docs/sources/flow/get-started/run/windows.md deleted file mode 100644 index 2ee89710b0..0000000000 --- a/docs/sources/flow/get-started/run/windows.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/windows/ -description: Learn how to run Grafana Agent Flow on Windows -menuTitle: Windows -title: Run Grafana Agent Flow on Windows -weight: 500 ---- - -# Run {{% param "PRODUCT_NAME" %}} on Windows - -{{< param "PRODUCT_NAME" >}} is [installed][InstallWindows] as a Windows Service. The service is configured to automatically run on startup. - -To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: - -1. Open the Windows Services manager (services.msc): - - 1. Right click on the Start Menu and select **Run**. - - 1. Type: `services.msc` and click **OK**. - -1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. - -## View {{% param "PRODUCT_NAME" %}} logs - -When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event -Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. - -To view the logs, perform the following steps: - -1. Open the Event Viewer: - - 1. Right click on the Start Menu and select **Run**. - - 1. Type `eventvwr` and click **OK**. - -1. In the Event Viewer, click on **Windows Logs > Application**. - -1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -{{% docs/reference %}} -[InstallWindows]: "/docs/agent/ -> /docs/agent//flow/get-started/install/windows.md" -[InstallWindows]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/windows.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/_index.md b/docs/sources/flow/reference/_index.md deleted file mode 100644 index 5c4e88aac9..0000000000 --- a/docs/sources/flow/reference/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/ -- /docs/grafana-cloud/send-data/agent/flow/reference/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/ -description: The reference-level documentaiton for Grafana Agent -menuTitle: Reference -title: Grafana Agent Flow Reference -weight: 600 ---- - -# {{% param "PRODUCT_NAME" %}} Reference - -This section provides reference-level documentation for the various parts of {{< param "PRODUCT_NAME" >}}: - -{{< section >}} diff --git a/docs/sources/flow/reference/cli/_index.md b/docs/sources/flow/reference/cli/_index.md deleted file mode 100644 index 43fa4be774..0000000000 --- a/docs/sources/flow/reference/cli/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/ -description: Learn about the Grafana Agent command line interface -menuTitle: Command-line interface -title: The Grafana Agent command-line interface -weight: 100 ---- - -# The {{% param "PRODUCT_ROOT_NAME" %}} command-line interface - -When in Flow mode, the `grafana-agent` binary exposes a command-line interface with -subcommands to perform various operations. - -The most common subcommand is [`run`][run] which accepts a configuration file and -starts {{< param "PRODUCT_NAME" >}}. - -Available commands: - -* [`convert`][convert]: Convert a {{< param "PRODUCT_ROOT_NAME" >}} configuration file. -* [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. -* [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. -* [`tools`][tools]: Read the WAL and provide statistical information. -* `completion`: Generate shell completion for the `grafana-agent-flow` CLI. -* `help`: Print help for supported commands. - -[run]: {{< relref "./run.md" >}} -[fmt]: {{< relref "./fmt.md" >}} -[convert]: {{< relref "./convert.md" >}} -[tools]: {{< relref "./tools.md" >}} diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md deleted file mode 100644 index 3b44d662e8..0000000000 --- a/docs/sources/flow/reference/cli/convert.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/convert/ -description: Learn about the convert command -labels: - stage: beta -menuTitle: convert -title: The convert command -weight: 100 ---- - -# The convert command - -The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. - -## Usage - -Usage: - -* `AGENT_MODE=flow grafana-agent convert [ ...] ` -* `grafana-agent-flow convert [ ...] ` - - Replace the following: - - * _``_: One or more flags that define the input and output of the command. - * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. - -If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is -equal to `-`, `convert` converts the contents of standard input. Otherwise, -`convert` reads and converts the file from disk specified by the argument. - -There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted configuration to a specified path. You can use the `--report` flag to generate a diagnostic report. The `--bypass-errors` flag allows you to bypass any [errors] generated during the file conversion. - -The command fails if the source configuration has syntactically incorrect -configuration or can't be converted to {{< param "PRODUCT_NAME" >}} River format. - -The following flags are supported: - -* `--output`, `-o`: The filepath and filename where the output is written. - -* `--report`, `-r`: The filepath and filename where the report is written. - -* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [prometheus], [promtail], [static]. - -* `--bypass-errors`, `-b`: Enable bypassing errors when converting. - -* `--extra-args`, `e`: Extra arguments from the original format used by the converter. - -[prometheus]: #prometheus -[promtail]: #promtail -[static]: #static -[errors]: #errors - -### Defaults - -{{< param "PRODUCT_NAME" >}} defaults are managed as follows: -* If a provided source configuration value matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. -* If a non-provided source configuration value default matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. -* If a non-provided source configuration value default doesn't match a {{< param "PRODUCT_NAME" >}} default value, the default value is included in the output. - -### Errors - -Errors are defined as non-critical issues identified during the conversion -where an output can still be generated. These can be bypassed using the -`--bypass-errors` flag. - -### Prometheus - -Using the `--source-format=prometheus` will convert the source configuration from -[Prometheus v2.45](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/) -to {{< param "PRODUCT_NAME" >}} configuration. - -This includes Prometheus features such as -[scrape_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config), -[relabel_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config), -[metric_relabel_configs](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#metric_relabel_configs), -[remote_write](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write), -and many supported *_sd_configs. Unsupported features in a source configuration result -in [errors]. - -Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-prometheus/" >}}) for a detailed migration guide. - -### Promtail - -Using the `--source-format=promtail` will convert the source configuration from -[Promtail v2.8.x](/docs/loki/v2.8.x/clients/promtail/) -to {{< param "PRODUCT_NAME" >}} configuration. - -Nearly all [Promtail features](/docs/loki/v2.8.x/clients/promtail/configuration/) -are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. - -If you have unsupported features in a source configuration, you will receive [errors] when you convert to a flow configuration. The converter will -also raise warnings for configuration options that may require your attention. - -Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-promtail/" >}}) for a detailed migration guide. - -### Static - -Using the `--source-format=static` will convert the source configuration from a -[Grafana Agent Static]({{< relref "../../../static" >}}) configuration to a {{< param "PRODUCT_NAME" >}} configuration. - -Include `--extra-args` for passing additional command line flags from the original format. -For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static -[integrations-next]({{< relref "../../../static/configuration/integrations/integrations-next/" >}}) -configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also -expand environment variables with `--extra-args="-config.expand-env"`. You can combine multiple command line -flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. - -If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will -also raise warnings for configuration options that may require your attention. - -Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. \ No newline at end of file diff --git a/docs/sources/flow/reference/cli/fmt.md b/docs/sources/flow/reference/cli/fmt.md deleted file mode 100644 index 7a266921d3..0000000000 --- a/docs/sources/flow/reference/cli/fmt.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/fmt/ -description: Learn about the fmt command -menuTitle: fmt -title: The fmt command -weight: 200 ---- - -# The fmt command - -The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration file. - -## Usage - -Usage: - -* `AGENT_MODE=flow grafana-agent fmt [FLAG ...] FILE_NAME` -* `grafana-agent-flow fmt [FLAG ...] FILE_NAME` - - Replace the following: - - * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The {{< param "PRODUCT_NAME" >}} configuration file. - -If the `FILE_NAME` argument is not provided or if the `FILE_NAME` argument is -equal to `-`, `fmt` formats the contents of standard input. Otherwise, -`fmt` reads and formats the file from disk specified by the argument. - -The `--write` flag can be specified to replace the contents of the original -file on disk with the formatted results. `--write` can only be provided when -`fmt` is not reading from standard input. - -The command fails if the file being formatted has syntactically incorrect River -configuration, but does not validate whether Flow components are configured -properly. - -The following flags are supported: - -* `--write`, `-w`: Write the formatted file back to disk when not reading from - standard input. diff --git a/docs/sources/flow/reference/components/_index.md b/docs/sources/flow/reference/components/_index.md deleted file mode 100644 index 3eafecb3c1..0000000000 --- a/docs/sources/flow/reference/components/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/ -description: Learn about the components in Grafana Agent Flow -title: Components reference -weight: 300 ---- - -# Components reference - -This section contains reference documentation for all recognized [components][]. - -{{< section >}} - -[components]: {{< relref "../../concepts/components.md" >}} diff --git a/docs/sources/flow/reference/components/faro.receiver.md b/docs/sources/flow/reference/components/faro.receiver.md deleted file mode 100644 index 36e37fa5fc..0000000000 --- a/docs/sources/flow/reference/components/faro.receiver.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/faro.receiver/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/faro.receiver/ -description: Learn about the faro.receiver -title: faro.receiver ---- - -# faro.receiver - -`faro.receiver` accepts web application telemetry data from the [Grafana Faro Web SDK][faro-sdk] -and forwards it to other components for future processing. - -[faro-sdk]: https://github.com/grafana/faro-web-sdk - -## Usage - -```river -faro.receiver "LABEL" { - output { - logs = [LOKI_RECEIVERS] - traces = [OTELCOL_COMPONENTS] - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`extra_log_labels` | `map(string)` | Extra labels to attach to emitted log lines. | `{}` | no - -## Blocks - -The following blocks are supported inside the definition of `faro.receiver`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -server | [server][] | Configures the HTTP server. | no -server > rate_limiting | [rate_limiting][] | Configures rate limiting for the HTTP server. | no -sourcemaps | [sourcemaps][] | Configures sourcemap retrieval. | no -sourcemaps > location | [location][] | Configures on-disk location for sourcemap retrieval. | no -output | [output][] | Configures where to send collected telemetry data. | yes - -[server]: #server-block -[rate_limiting]: #rate_limiting-block -[sourcemaps]: #sourcemaps-block -[location]: #location-block -[output]: #output-block - -### server block - -The `server` block configures the HTTP server managed by the `faro.receiver` -component. Clients using the [Grafana Faro Web SDK][faro-sdk] forward telemetry -data to this HTTP server for processing. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`listen_address` | `string` | Address to listen for HTTP traffic on. | `127.0.0.1` | no -`listen_port` | `number` | Port to listen for HTTP traffic on. | `12347` | no -`cors_allowed_origins` | `list(string)` | Origins for which cross-origin requests are permitted. | `[]` | no -`api_key` | `secret` | Optional API key to validate client requests with. | `""` | no -`max_allowed_payload_size` | `string` | Maximum size (in bytes) for client requests. | `"5MiB"` | no - -By default, telemetry data is only accepted from applications on the same local -network as the browser. To accept telemetry data from a wider set of clients, -modify the `listen_address` attribute to the IP address of the appropriate -network interface to use. - -The `cors_allowed_origins` argument determines what origins browser requests -may come from. The default value, `[]`, disables CORS support. To support -requests from all origins, set `cors_allowed_origins` to `["*"]`. The `*` -character indicates a wildcard. - -When the `api_key` argument is non-empty, client requests must have an HTTP -header called `X-API-Key` matching the value of the `api_key` argument. -Requests that are missing the header or have the wrong value are rejected with -an `HTTP 401 Unauthorized` status code. If the `api_key` argument is empty, no -authentication checks are performed, and the `X-API-Key` HTTP header is -ignored. - -### rate_limiting block - -The `rate_limiting` block configures rate limiting for client requests. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Whether to enable rate limiting. | `true` | no -`rate` | `number` | Rate of allowed requests per second. | `50` | no -`burst_size` | `number` | Allowed burst size of requests. | `100` | no - -Rate limiting functions as a [token bucket algorithm][token-bucket], where -a bucket has a maximum capacity for up to `burst_size` requests and refills at a -rate of `rate` per second. - -Each HTTP request drains the capacity of the bucket by one. Once the bucket is -empty, HTTP requests are rejected with an `HTTP 429 Too Many Requests` status -code until the bucket has more available capacity. - -Configuring the `rate` argument determines how fast the bucket refills, and -configuring the `burst_size` argument determines how many requests can be -received in a burst before the bucket is empty and starts rejecting requests. - -[token-bucket]: https://en.wikipedia.org/wiki/Token_bucket - -### sourcemaps block - -The `sourcemaps` block configures how to retrieve sourcemaps. Sourcemaps are -then used to transform file and line information from minified code into the -file and line information from the original source code. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`download` | `bool` | Whether to download sourcemaps. | `true` | no -`download_from_origins` | `list(string)` | Which origins to download sourcemaps from. | `["*"]` | no -`download_timeout` | `duration` | Timeout when downloading sourcemaps. | `"1s"` | no - -When exceptions are sent to the `faro.receiver` component, it can download -sourcemaps from the web application. You can disable this behavior by setting -the `download` argument to `false`. - -The `download_from_origins` argument determines which origins a sourcemap may -be downloaded from. The origin is attached to the URL that a browser is sending -telemetry data from. The default value, `["*"]`, enables downloading sourcemaps -from all origins. The `*` character indicates a wildcard. - -By default, sourcemap downloads are subject to a timeout of `"1s"`, specified -by the `download_timeout` argument. Setting `download_timeout` to `"0s"` -disables timeouts. - -To retrieve sourcemaps from disk instead of the network, specify one or more -[`location` blocks][location]. When `location` blocks are provided, they are -checked first for sourcemaps before falling back to downloading. - -### location block - -The `location` block declares a location where sourcemaps are stored on the -filesystem. The `location` block can be specified multiple times to declare -multiple locations where sourcemaps are stored. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`path` | `string` | The path on disk where sourcemaps are stored. | | yes -`minified_path_prefix` | `string` | The prefix of the minified path sent from browsers. | | yes - -The `minified_path_prefix` argument determines the prefix of paths to -Javascript files, such as `http://example.com/`. The `path` argument then -determines where to find the sourcemap for the file. - -For example, given the following location block: - -``` -location { - path = "/var/my-app/build" - minified_path_prefix = "http://example.com/" -} -``` - -To look up the sourcemaps for a file hosted at `http://example.com/foo.js`, the -`faro.receiver` component will: - -1. Remove the minified path prefix to extract the path to the file (`foo.js`). -2. Search for that file path with a `.map` extension (`foo.js.map`) in `path` - (`/var/my-app/build/foo.js.map`). - -Optionally, the value for the `path` argument may contain `{{ .Release }}` as a -template value, such as `/var/my-app/{{ .Release }}/build`. The template value -will be replaced with the release value provided by the [Faro Web App SDK][faro-sdk]. - -### output block - -The `output` block specifies where to forward collected logs and traces. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`logs` | `list(LogsReceiver)` | A list of `loki` components to forward logs to. | `[]` | no -`traces` | `list(otelcol.Consumer)` | A list of `otelcol` components to forward traces to. | `[]` | no - -## Exported fields - -`faro.receiver` does not export any fields. - -## Component health - -`faro.receiver` is reported as unhealthy when the integrated server fails to -start. - -## Debug information - -`faro.receiver` does not expose any component-specific debug information. - -## Debug metrics - -`faro.receiver` exposes the following metrics for monitoring the component: - -* `faro_receiver_logs_total` (counter): Total number of ingested logs. -* `faro_receiver_measurements_total` (counter): Total number of ingested measurements. -* `faro_receiver_exceptions_total` (counter): Total number of ingested exceptions. -* `faro_receiver_events_total` (counter): Total number of ingested events. -* `faro_receiver_exporter_errors_total` (counter): Total number of errors produced by an internal exporter. -* `faro_receiver_request_duration_seconds` (histogram): Time (in seconds) spent serving HTTP requests. -* `faro_receiver_request_message_bytes` (histogram): Size (in bytes) of HTTP requests received from clients. -* `faro_receiver_response_message_bytes` (histogram): Size (in bytes) of HTTP responses sent to clients. -* `faro_receiver_inflight_requests` (gauge): Current number of inflight requests. -* `faro_receiver_sourcemap_cache_size` (counter): Number of items in sourcemap cache per origin. -* `faro_receiver_sourcemap_downloads_total` (counter): Total number of sourcemap downloads performed per origin and status. -* `faro_receiver_sourcemap_file_reads_total` (counter): Total number of sourcemap retrievals using the filesystem per origin and status. - -## Example - -```river -faro.receiver "default" { - server { - listen_address = "NETWORK_ADDRESS" - } - - sourcemaps { - location { - path = "PATH_TO_SOURCEMAPS" - minified_path_prefix = "WEB_APP_PREFIX" - } - } - - output { - logs = [loki.write.default.receiver] - traces = [otelcol.exporter.otlp.traces.input] - } -} - -loki.write "default" { - endpoint { - url = "https://LOKI_ADDRESS/api/v1/push" - } -} - -otelcol.exporter.otlp "traces" { - client { - endpoint = "OTLP_ADDRESS" - } -} -``` - -Replace the following: - -* `NETWORK_ADDRESS`: IP address of the network interface to listen to traffic - on. This IP address must be reachable by browsers using the web application - to instrument. - -* `PATH_TO_SOURCEMAPS`: Path on disk where sourcemaps are located. - -* `WEB_APP_PREFIX`: Prefix of the web application being instrumented. - -* `LOKI_ADDRESS`: Address of the Loki server to send logs to. - - * If authentication is required to send logs to the Loki server, refer to the - documentation of [loki.write][] for more information. - -* `OTLP_ADDRESS`: The address of the OTLP-compatible server to send traces to. - - * If authentication is required to send logs to the Loki server, refer to the - documentation of [otelcol.exporter.otlp][] for more information. - -[loki.write]: {{< relref "./loki.write.md" >}} -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} - - - -## Compatible components - -`faro.receiver` can accept arguments from the following components: - -- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) -- Components that export [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-exporters) - - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - diff --git a/docs/sources/flow/reference/components/local.file.md b/docs/sources/flow/reference/components/local.file.md deleted file mode 100644 index 5e935a0bbb..0000000000 --- a/docs/sources/flow/reference/components/local.file.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file/ -description: Learn about local.file -title: local.file ---- - -# local.file - -`local.file` exposes the contents of a file on disk to other components. The -file will be watched for changes so that its latest content is always exposed. - -The most common use of `local.file` is to load secrets (e.g., API keys) from -files. - -Multiple `local.file` components can be specified by giving them different -labels. - -## Usage - -```river -local.file "LABEL" { - filename = FILE_NAME -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`filename` | `string` | Path of the file on disk to watch | | yes -`detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no -`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no - -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} - -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`content` | `string` or `secret` | The contents of the file from the most recent read - -The `content` field will have the `secret` type only if the `is_secret` -argument was true. - -## Component health - -`local.file` will be reported as healthy whenever if the watched file was read -successfully. - -Failing to read the file whenever an update is detected (or after the poll -period elapses) will cause the component to be reported as unhealthy. When -unhealthy, exported fields will be kept at the last healthy value. The read -error will be exposed as a log message and in the debug information for the -component. - -## Debug information - -`local.file` does not expose any component-specific debug information. - -## Debug metrics - -* `agent_local_file_timestamp_last_accessed_unix_seconds` (gauge): The - timestamp, in Unix seconds, that the file was last successfully accessed. - -## Example - -```river -local.file "secret_key" { - filename = "/var/secrets/password.txt" - is_secret = true -} -``` diff --git a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md deleted file mode 100644 index 8a5c8fdfaa..0000000000 --- a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.azure_event_hubs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.azure_event_hubs/ -description: Learn about loki.source.azure_event_hubs -title: loki.source.azure_event_hubs ---- - -# loki.source.azure_event_hubs - -`loki.source.azure_event_hubs` receives Azure Event Hubs messages by making use of an Apache Kafka -endpoint on Event Hubs. For more information, see -the [Azure Event Hubs documentation](https://learn.microsoft.com/en-us/azure/event-hubs/azure-event-hubs-kafka-overview). - -To learn more about streaming Azure logs to an Azure Event Hubs, refer to -Microsoft's tutorial on how to [Stream Azure Active Directory logs to an Azure event hub](https://learn.microsoft.com/en-us/azure/active-directory/reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub). - -Note that an Apache Kafka endpoint is not available within the Basic pricing plan. For more information, see -the [Event Hubs pricing page](https://azure.microsoft.com/en-us/pricing/details/event-hubs/). - -Multiple `loki.source.azure_event_hubs` components can be specified by giving them -different labels. - -## Usage - -```river -loki.source.azure_event_hubs "LABEL" { - fully_qualified_namespace = "HOST:PORT" - event_hubs = EVENT_HUB_LIST - forward_to = RECEIVER_LIST - - authentication { - mechanism = "AUTHENTICATION_MECHANISM" - } -} -``` - -## Arguments - -`loki.source.azure_event_hubs` supports the following arguments: - - Name | Type | Description | Default | Required ------------------------------|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|---------- - `fully_qualified_namespace` | `string` | Event hub namespace. | | yes - `event_hubs` | `list(string)` | Event Hubs to consume. | | yes - `group_id` | `string` | The Kafka consumer group id. | `"loki.source.azure_event_hubs"` | no - `assignor` | `string` | The consumer group rebalancing strategy to use. | `"range"` | no - `use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from Azure Event Hub. | `false` | no - `labels` | `map(string)` | The labels to associate with each received event. | `{}` | no - `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes - `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no - `disallow_custom_messages` | `bool` | Whether to ignore messages that don't match the [schema](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema) for Azure resource logs. | `false` | no - `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no - -The `fully_qualified_namespace` argument must refer to a full `HOST:PORT` that points to your event hub, such as `NAMESPACE.servicebus.windows.net:9093`. -The `assignor` argument must be set to one of `"range"`, `"roundrobin"`, or `"sticky"`. - -The `relabel_rules` field can make use of the `rules` export value from a -`loki.relabel` component to apply one or more relabeling rules to log entries -before they're forwarded to the list of receivers in `forward_to`. - -### Labels - -The `labels` map is applied to every message that the component reads. - -The following internal labels prefixed with `__` are available but are discarded if not relabeled: - -- `__meta_kafka_message_key` -- `__meta_kafka_topic` -- `__meta_kafka_partition` -- `__meta_kafka_member_id` -- `__meta_kafka_group_id` -- `__azure_event_hubs_category` - -## Blocks - -The following blocks are supported inside the definition of `loki.source.azure_event_hubs`: - - Hierarchy | Name | Description | Required -----------------|------------------|----------------------------------------------------|---------- - authentication | [authentication] | Authentication configuration with Azure Event Hub. | yes - -[authentication]: #authentication-block - -### authentication block - -The `authentication` block defines the authentication method when communicating with Azure Event Hub. - - Name | Type | Description | Default | Required ----------------------|----------------|---------------------------------------------------------------------------|---------|---------- - `mechanism` | `string` | Authentication mechanism. | | yes - `connection_string` | `string` | Event Hubs ConnectionString for authentication on Azure Cloud. | | no - `scopes` | `list(string)` | Access token scopes. Default is `fully_qualified_namespace` without port. | | no - -`mechanism` supports the values `"connection_string"` and `"oauth"`. If `"connection_string"` is used, -you must set the `connection_string` attribute. If `"oauth"` is used, you must configure one of the supported credential -types as documented -here: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#credential-types via environment -variables or Azure CLI. - -## Exported fields - -`loki.source.azure_event_hubs` does not export any fields. - -## Component health - -`loki.source.azure_event_hubs` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`loki.source.azure_event_hubs` does not expose additional debug info. - -## Example - -This example consumes messages from Azure Event Hub and uses OAuth to authenticate itself. - -```river -loki.source.azure_event_hubs "example" { - fully_qualified_namespace = "my-ns.servicebus.windows.net:9093" - event_hubs = ["gw-logs"] - forward_to = [loki.write.example.receiver] - - authentication { - mechanism = "oauth" - } -} - -loki.write "example" { - endpoint { - url = "loki:3100/api/v1/push" - } -} -``` - -## Compatible components - -`loki.source.azure_event_hubs` can accept arguments from the following components: - -- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) - - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md deleted file mode 100644 index eccaf51f9f..0000000000 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlphttp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlphttp/ -description: Learn about otelcol.exporter.otlphttp -title: otelcol.exporter.otlphttp ---- - -# otelcol.exporter.otlphttp - -`otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` -components and writes them over the network using the OTLP HTTP protocol. - -> **NOTE**: `otelcol.exporter.otlphttp` is a wrapper over the upstream -> OpenTelemetry Collector `otlphttp` exporter. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. - -Multiple `otelcol.exporter.otlphttp` components can be specified by giving them -different labels. - -## Usage - -```river -otelcol.exporter.otlphttp "LABEL" { - client { - endpoint = "HOST:PORT" - } -} -``` - -## Arguments - -`otelcol.exporter.otlphttp` supports the following arguments: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`metrics_endpoint` | `string` | The endpoint to send metrics to. | `client.endpoint + "/v1/metrics"` | no -`logs_endpoint` | `string` | The endpoint to send logs to. | `client.endpoint + "/v1/logs"` | no -`traces_endpoint` | `string` | The endpoint to send traces to. | `client.endpoint + "/v1/traces"` | no - -The default value depends on the `endpoint` field set in the required `client` -block. If set, these arguments override the `client.endpoint` field for the -corresponding signal. - -## Blocks - -The following blocks are supported inside the definition of -`otelcol.exporter.otlphttp`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures the HTTP server to send telemetry data to. | yes -client > tls | [tls][] | Configures TLS for the HTTP client. | no -sending_queue | [sending_queue][] | Configures batching of data before sending. | no -retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no - -The `>` symbol indicates deeper levels of nesting. For example, `client > tls` -refers to a `tls` block defined inside a `client` block. - -[client]: #client-block -[tls]: #tls-block -[sending_queue]: #sending_queue-block -[retry_on_failure]: #retry_on_failure-block -[debug_metrics]: #debug_metrics-block - -### client block - -The `client` block configures the HTTP client used by the component. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | The target URL to send telemetry data to. | | yes -`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no -`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no -`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no -`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no -`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no -`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no -`disable_keep_alives`| `bool` | Disable HTTP keep-alive. | `false` | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no - -Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. -Before enabling this option, consider whether changes to idle connection settings can achieve your goal. - -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} - -### tls block - -The `tls` block configures TLS settings used for the connection to the HTTP -server. - -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} - -### sending_queue block - -The `sending_queue` block configures an in-memory buffer of batches before data is sent -to the HTTP server. - -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} - -### retry_on_failure block - -The `retry_on_failure` block configures how failed requests to the HTTP server are -retried. - -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} - -### debug_metrics block - -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. - -`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, -logs, or traces). - -## Component health - -`otelcol.exporter.otlphttp` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`otelcol.exporter.otlphttp` does not expose any component-specific debug -information. - -## Example - -This example creates an exporter to send data to a locally running Grafana -Tempo without TLS: - -```river -otelcol.exporter.otlphttp "tempo" { - client { - endpoint = "http://tempo:4317" - tls { - insecure = true - insecure_skip_verify = true - } - } -} -``` - - -## Compatible components - -`otelcol.exporter.otlphttp` has exports that can be consumed by the following components: - -- Components that consume [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-consumers) - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md deleted file mode 100644 index 893d38b591..0000000000 --- a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -description: Learn about otelcol.extension.jaeger_remote_sampling -label: - stage: experimental -title: otelcol.extension.jaeger_remote_sampling ---- - -# otelcol.extension.jaeger_remote_sampling - -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} - -`otelcol.extension.jaeger_remote_sampling` serves a specified Jaeger remote sampling -document. - -> **NOTE**: `otelcol.extension.jaeger_remote_sampling` is a wrapper over the upstream OpenTelemetry -> Collector `jaegerremotesampling` extension. Bug reports or feature requests will be -> redirected to the upstream repository, if necessary. - -Multiple `otelcol.extension.jaeger_remote_sampling` components can be specified by giving them -different labels. - -## Usage - -```river -otelcol.extension.jaeger_remote_sampling "LABEL" { - source { - } -} -``` - -## Arguments - -`otelcol.extension.jaeger_remote_sampling` doesn't support any arguments and is configured fully -through inner blocks. - -## Blocks - -The following blocks are supported inside the definition of -`otelcol.extension.jaeger_remote_sampling`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -http | [http][] | Configures the http server to serve Jaeger remote sampling. | no -http > tls | [tls][] | Configures TLS for the HTTP server. | no -http > cors | [cors][] | Configures CORS for the HTTP server. | no -grpc | [grpc][] | Configures the grpc server to serve Jaeger remote sampling. | no -grpc > tls | [tls][] | Configures TLS for the gRPC server. | no -grpc > keepalive | [keepalive][] | Configures keepalive settings for the configured server. | no -grpc > keepalive > server_parameters | [server_parameters][] | Server parameters used to configure keepalive settings. | no -grpc > keepalive > enforcement_policy | [enforcement_policy][] | Enforcement policy for keepalive settings. | no -source | [source][] | Configures the Jaeger remote sampling document. | yes -source > remote | [remote][] | Configures the gRPC client used to retrieve the Jaeger remote sampling document. | no -source > remote > tls | [tls][] | Configures TLS for the gRPC client. | no -source > remote > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no - -The `>` symbol indicates deeper levels of nesting. For example, `grpc > tls` -refers to a `tls` block defined inside a `grpc` block. - -[http]: #http-block -[tls]: #tls-block -[cors]: #cors-block -[grpc]: #grpc-block -[keepalive]: #keepalive-block -[server_parameters]: #server_parameters-block -[enforcement_policy]: #enforcement_policy-block -[source]: #source-block -[remote]: #remote-block -[tls_client]: #tls-client-block -[keepalive_client]: #keepalive-client-block - -### http block - -The `http` block configures an HTTP server which serves the Jaeger remote -sampling document. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:5778"` | no -`max_request_body_size` | `string` | Maximum request body size the server will allow. No limit when unset. | | no -`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no - -### tls block - -The `tls` block configures TLS settings used for a server. If the `tls` block -isn't provided, TLS won't be used for connections to the server. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ca_file` | `string` | Path to the CA file. | | no -`cert_file` | `string` | Path to the TLS certificate. | | no -`key_file` | `string` | Path to the TLS certificate key. | | no -`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no -`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no -`reload_interval` | `duration` | Frequency to reload the certificates. | | no -`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no - -### cors block - -The `cors` block configures CORS settings for an HTTP server. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`allowed_origins` | `list(string)` | Allowed values for the `Origin` header. | | no -`allowed_headers` | `list(string)` | Accepted headers from CORS requests. | `["X-Requested-With"]` | no -`max_age` | `number` | Configures the `Access-Control-Max-Age` response header. | | no - -The `allowed_headers` specifies which headers are acceptable from a CORS -request. The following headers are always implicitly allowed: - -* `Accept` -* `Accept-Language` -* `Content-Type` -* `Content-Language` - -If `allowed_headers` includes `"*"`, all headers will be permitted. - -### grpc block - -The `grpc` block configures a gRPC server which serves the Jaeger remote - sampling document. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:14250"` | no -`transport` | `string` | Transport to use for the gRPC server. | `"tcp"` | no -`max_recv_msg_size` | `string` | Maximum size of messages the server will accept. 0 disables a limit. | | no -`max_concurrent_streams` | `number` | Limit the number of concurrent streaming RPC calls. | | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC server will use for reading from clients. | `"512KiB"` | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC server will use for writing to clients. | | no -`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no - -### keepalive block - -The `keepalive` block configures keepalive settings for connections to a gRPC -server. - -`keepalive` doesn't support any arguments and is configured fully through inner -blocks. - -### server_parameters block - -The `server_parameters` block controls keepalive and maximum age settings for gRPC -servers. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`max_connection_idle` | `duration` | Maximum age for idle connections. | `"infinity"` | no -`max_connection_age` | `duration` | Maximum age for non-idle connections. | `"infinity"` | no -`max_connection_age_grace` | `duration` | Time to wait before forcibly closing connections. | `"infinity"` | no -`time` | `duration` | How often to ping inactive clients to check for liveness. | `"2h"` | no -`timeout` | `duration` | Time to wait before closing inactive clients that do not respond to liveness checks. | `"20s"` | no - -### enforcement_policy block - -The `enforcement_policy` block configures the keepalive enforcement policy for -gRPC servers. The server will close connections from clients that violate the -configured policy. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`min_time` | `duration` | Minimum time clients should wait before sending a keepalive ping. | `"5m"` | no -`permit_without_stream` | `boolean` | Allow clients to send keepalive pings when there are no active streams. | `false` | no - -### source block - -The `source` block configures the method of retrieving the Jaeger remote sampling document -that is served by the servers specified in the `grpc` and `http` blocks. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`file` | `string` | A local file containing a Jaeger remote sampling document. | `""` | no -`reload_interval` | `duration` | The interval at which to reload the specified file. Leave at 0 to never reload. | `0` | no -`content` | `string` | A string containing the Jaeger remote sampling contents directly. | `""` | no - -Exactly one of the `file` argument, `content` argument or `remote` block must be specified. - -### remote block - -The `remote` block configures the gRPC client used by the component. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to send telemetry data to. | | yes -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no - -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} - -An HTTP proxy can be configured through the following environment variables: - -* `HTTPS_PROXY` -* `NO_PROXY` - -The `HTTPS_PROXY` environment variable specifies a URL to use for proxying -requests. Connections to the proxy are established via [the `HTTP CONNECT` -method][HTTP CONNECT]. - -The `NO_PROXY` environment variable is an optional list of comma-separated -hostnames for which the HTTPS proxy should _not_ be used. Each hostname can be -provided as an IP address (`1.2.3.4`), an IP address in CIDR notation -(`1.2.3.4/8`), a domain name (`example.com`), or `*`. A domain name matches -that domain and all subdomains. A domain name with a leading "." -(`.example.com`) matches subdomains only. `NO_PROXY` is only read when -`HTTPS_PROXY` is set. - -Because `otelcol.extension.jaeger_remote_sampling` uses gRPC, the configured proxy server must be -able to handle and proxy HTTP/2 traffic. - -[HTTP CONNECT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT - -### tls client block - -The `tls` block configures TLS settings used for the connection to the gRPC -server. - -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} - -### keepalive client block - -The `keepalive` block configures keepalive settings for gRPC client -connections. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no - -## Component health - -`otelcol.extension.jaeger_remote_sampling` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`otelcol.extension.jaeger_remote_sampling` does not expose any component-specific debug information. - -## Examples - -### Serving from a file - -This example configures the Jaeger remote sampling extension to load a local json document and -serve it over the default http port of 5778. Currently this config style exists for consistency -with upstream Opentelemetry Collector components and may be removed. - -```river -otelcol.extension.jaeger_remote_sampling "example" { - http { - } - source { - file = "/path/to/jaeger-sampling.json" - reload_interval = "10s" - } -} -``` - -### Serving from another component - - -This example uses the output of a component to determine what sampling -rules to serve: - -```river -local.file "sampling" { - filename = "/path/to/jaeger-sampling.json" -} - -otelcol.extension.jaeger_remote_sampling "example" { - http { - } - source { - content = local.file.sampling.content - } -} -``` diff --git a/docs/sources/flow/reference/config-blocks/_index.md b/docs/sources/flow/reference/config-blocks/_index.md deleted file mode 100644 index bf528e3a16..0000000000 --- a/docs/sources/flow/reference/config-blocks/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/ -description: Learn about configuration blocks -title: Configuration blocks -weight: 200 ---- - -# Configuration blocks - -Configuration blocks are optional top-level blocks that can be used to -configure various parts of the {{< param "PRODUCT_NAME" >}} process. Each configuration block can -only be defined once. - -Configuration blocks are _not_ components, so they have no exports. - -{{< section >}} diff --git a/docs/sources/flow/reference/stdlib/_index.md b/docs/sources/flow/reference/stdlib/_index.md deleted file mode 100644 index 8f42f4bc28..0000000000 --- a/docs/sources/flow/reference/stdlib/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/ -- standard-library/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/ -description: The standard library is a list of functions used in expressions when - assigning values to attributes -title: Standard library -weight: 400 ---- - -# Standard library - -The standard library is a list of functions which can be used in expressions -when assigning values to attributes. - -All standard library functions are [pure functions](https://en.wikipedia.org/wiki/Pure_function): they will always return the same -output if given the same input. - -{{< section >}} diff --git a/docs/sources/flow/reference/stdlib/coalesce.md b/docs/sources/flow/reference/stdlib/coalesce.md deleted file mode 100644 index 73f5cd4448..0000000000 --- a/docs/sources/flow/reference/stdlib/coalesce.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/coalesce/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/coalesce/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/coalesce/ -description: Learn about coalesce -title: coalesce ---- - -# coalesce - -`coalesce` takes any number of arguments and returns the first one that isn't null, an empty string, empty list, or an empty object. -It is useful for obtaining a default value, such as if an environment variable isn't defined. -If no argument is non-empty or non-zero, the last argument is returned. - -## Examples - -``` -> coalesce("a", "b") -a -> coalesce("", "b") -b -> coalesce(env("DOES_NOT_EXIST"), "c") -c -``` diff --git a/docs/sources/flow/reference/stdlib/concat.md b/docs/sources/flow/reference/stdlib/concat.md deleted file mode 100644 index 36e7eba906..0000000000 --- a/docs/sources/flow/reference/stdlib/concat.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/concat/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/concat/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/concat/ -description: Learn about concat -title: concat ---- - -# concat - -The `concat` function concatenates one or more lists of values into a single -list. Each argument to `concat` must be a list value. Elements within the list -can be any type. - -## Examples - -``` -> concat([]) -[] - -> concat([1, 2], [3, 4]) -[1, 2, 3, 4] - -> concat([1, 2], [], [bool, null]) -[1, 2, bool, null] - -> concat([[1, 2], [3, 4]], [[5, 6]]) -[[1, 2], [3, 4], [5, 6]] -``` diff --git a/docs/sources/flow/reference/stdlib/constants.md b/docs/sources/flow/reference/stdlib/constants.md deleted file mode 100644 index 3caf5c336a..0000000000 --- a/docs/sources/flow/reference/stdlib/constants.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/constants/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/constants/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/constants/ -description: Learn about constants -title: constants ---- - -# constants - -The `constants` object exposes a list of constant values about the system -{{< param "PRODUCT_NAME" >}} is running on: - -* `constants.hostname`: The hostname of the machine {{< param "PRODUCT_NAME" >}} is running - on. -* `constants.os`: The operating system {{< param "PRODUCT_NAME" >}} is running on. -* `constants.arch`: The architecture of the system {{< param "PRODUCT_NAME" >}} is running on. - -## Examples - -``` -> constants.hostname -"my-hostname" - -> constants.os -"linux" - -> constants.arch -"amd64" -``` diff --git a/docs/sources/flow/reference/stdlib/env.md b/docs/sources/flow/reference/stdlib/env.md deleted file mode 100644 index 49a65d1a6a..0000000000 --- a/docs/sources/flow/reference/stdlib/env.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/env/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/env/ -description: Learn about env -title: env ---- - -# env - -The `env` function gets the value of an environment variable from the system {{< param "PRODUCT_NAME" >}} is running on. -If the environment variable does not exist, `env` returns an empty string. - -## Examples - -``` -> env("HOME") -"/home/grafana-agent" - -> env("DOES_NOT_EXIST") -"" -``` diff --git a/docs/sources/flow/reference/stdlib/join.md b/docs/sources/flow/reference/stdlib/join.md deleted file mode 100644 index 3203585c81..0000000000 --- a/docs/sources/flow/reference/stdlib/join.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/join/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/join/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/join/ -description: Learn about join -title: join ---- - -# join - -`join` all items in an array into a string, using a character as separator. - -```river -join(list, separator) -``` - -## Examples - -```river -> join(["foo", "bar", "baz"], "-") -"foo-bar-baz" -> join(["foo", "bar", "baz"], ", ") -"foo, bar, baz" -> join(["foo"], ", ") -"foo" -``` diff --git a/docs/sources/flow/reference/stdlib/json_decode.md b/docs/sources/flow/reference/stdlib/json_decode.md deleted file mode 100644 index d56fc45dab..0000000000 --- a/docs/sources/flow/reference/stdlib/json_decode.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/json_decode/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_decode/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_decode/ -description: Learn about json_decode -title: json_decode ---- - -# json_decode - -The `json_decode` function decodes a string representing JSON into a River -value. `json_decode` fails if the string argument provided cannot be parsed as -JSON. - -A common use case of `json_decode` is to decode the output of a -[`local.file`][] component to a River value. - -> Remember to escape double quotes when passing JSON string literals to `json_decode`. -> -> For example, the JSON value `{"key": "value"}` is properly represented by the -> string `"{\"key\": \"value\"}"`. - -## Examples - -``` -> json_decode("15") -15 - -> json_decode("[1, 2, 3]") -[1, 2, 3] - -> json_decode("null") -null - -> json_decode("{\"key\": \"value\"}") -{ - key = "value", -} - -> json_decode(local.file.some_file.content) -"Hello, world!" -``` - -[`local.file`]: {{< relref "../components/local.file.md" >}} diff --git a/docs/sources/flow/reference/stdlib/json_path.md b/docs/sources/flow/reference/stdlib/json_path.md deleted file mode 100644 index 91058e6e31..0000000000 --- a/docs/sources/flow/reference/stdlib/json_path.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/json_path/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_path/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_path/ -description: Learn about json_path -title: json_path ---- - -# json_path - -The `json_path` function lookup values using [jsonpath](https://goessner.net/articles/JsonPath/) syntax. - -The function expects two strings. The first string is the JSON string used look up values. The second string is the jsonpath expression. - -`json_path` always returns a list of values. If the jsonpath expression does not match any values, an empty list is returned. - -A common use case of `json_path` is to decode and filter the output of a [`local.file`][] or [`remote.http`][] component to a River value. - -> Remember to escape double quotes when passing JSON string literals to `json_path`. -> -> For example, the JSON value `{"key": "value"}` is properly represented by the -> string `"{\"key\": \"value\"}"`. - -## Examples - -``` -> json_path("{\"key\": \"value\"}", ".key") -["value"] - - -> json_path("[{\"name\": \"Department\",\"value\": \"IT\"},{\"name\":\"TestStatus\",\"value\":\"Pending\"}]", "[?(@.name == \"Department\")].value") -["IT"] - -> json_path("{\"key\": \"value\"}", ".nonexists") -[] - -> json_path("{\"key\": \"value\"}", ".key")[0] -value - -``` - -[`local.file`]: {{< relref "../components/local.file.md" >}} -[`remote.http`]: {{< relref "../components/remote.http.md" >}} diff --git a/docs/sources/flow/reference/stdlib/nonsensitive.md b/docs/sources/flow/reference/stdlib/nonsensitive.md deleted file mode 100644 index a2bb0bd31d..0000000000 --- a/docs/sources/flow/reference/stdlib/nonsensitive.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/nonsensitive/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/nonsensitive/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/nonsensitive/ -description: Learn about nonsensitive -title: nonsensitive ---- - -# nonsensitive - -`nonsensitive` converts a [secret][] value back into a string. - -> **WARNING**: Only use `nonsensitive` when you are positive that the value -> being converted back to a string is not a sensitive value. -> -> Strings resulting from calls to `nonsensitive` will be displayed in plaintext -> in the UI and internal API calls. - -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} - -## Examples - -``` -// Assuming `sensitive_value` is a secret: - -> sensitive_value -(secret) -> nonsensitive(sensitive_value) -"Hello, world!" -``` diff --git a/docs/sources/flow/reference/stdlib/replace.md b/docs/sources/flow/reference/stdlib/replace.md deleted file mode 100644 index 2c1eb383f3..0000000000 --- a/docs/sources/flow/reference/stdlib/replace.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/replace/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/replace/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/replace/ -description: Learn about replace -title: replace ---- - -# replace - -`replace` searches a string for a substring, and replaces each occurrence of the substring with a replacement string. - -```river -replace(string, substring, replacement) -``` - -## Examples - -```river -> replace("1 + 2 + 3", "+", "-") -"1 - 2 - 3" -``` diff --git a/docs/sources/flow/reference/stdlib/split.md b/docs/sources/flow/reference/stdlib/split.md deleted file mode 100644 index 3087ca1536..0000000000 --- a/docs/sources/flow/reference/stdlib/split.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/split/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/split/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/split/ -description: Learn about split -title: split ---- - -# split - -`split` produces a list by dividing a string at all occurrences of a separator. - -```river -split(list, separator) -``` - -## Examples - -```river -> split("foo,bar,baz", "," ) -["foo", "bar", "baz"] - -> split("foo", ",") -["foo"] - -> split("", ",") -[""] -``` diff --git a/docs/sources/flow/reference/stdlib/to_lower.md b/docs/sources/flow/reference/stdlib/to_lower.md deleted file mode 100644 index 8c252fb354..0000000000 --- a/docs/sources/flow/reference/stdlib/to_lower.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/to_lower/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_lower/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_lower/ -description: Learn about to_lower -title: to_lower ---- - -# to_lower - -`to_lower` converts all uppercase letters in a string to lowercase. - -## Examples - -```river -> to_lower("HELLO") -"hello" -``` diff --git a/docs/sources/flow/reference/stdlib/to_upper.md b/docs/sources/flow/reference/stdlib/to_upper.md deleted file mode 100644 index aef26d5ff6..0000000000 --- a/docs/sources/flow/reference/stdlib/to_upper.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/to_upper/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_upper/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_upper/ -description: Learn about to_upper -title: to_upper ---- - -# to_upper - -`to_upper` converts all lowercase letters in a string to uppercase. - -## Examples - -```river -> to_upper("hello") -"HELLO" -``` diff --git a/docs/sources/flow/reference/stdlib/trim.md b/docs/sources/flow/reference/stdlib/trim.md deleted file mode 100644 index 5023d1f213..0000000000 --- a/docs/sources/flow/reference/stdlib/trim.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim/ -description: Learn about trim -title: trim ---- - -# trim - -`trim` removes the specified set of characters from the start and end of a string. - -```river -trim(string, str_character_set) -``` - -## Examples - -```river -> trim("?!hello?!", "!?") -"hello" - -> trim("foobar", "far") -"oob" - -> trim(" hello! world.! ", "! ") -"hello! world." -``` diff --git a/docs/sources/flow/reference/stdlib/trim_prefix.md b/docs/sources/flow/reference/stdlib/trim_prefix.md deleted file mode 100644 index 33d716f133..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_prefix.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_prefix/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_prefix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_prefix/ -description: Learn about trim_prefix -title: trim_prefix ---- - -# trim_prefix - -`trim_prefix` removes the prefix from the start of a string. If the string does not start with the prefix, the string is returned unchanged. - -## Examples - -```river -> trim_prefix("helloworld", "hello") -"world" -``` diff --git a/docs/sources/flow/reference/stdlib/trim_space.md b/docs/sources/flow/reference/stdlib/trim_space.md deleted file mode 100644 index 5e13e0ba0d..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_space.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_space/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_space/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_space/ -description: Learn about trim_space -title: trim_space ---- - -# trim_space - -`trim_space` removes any whitespace characters from the start and end of a string. - -## Examples - -```river -> trim_space(" hello\n\n") -"hello" -``` diff --git a/docs/sources/flow/reference/stdlib/trim_suffix.md b/docs/sources/flow/reference/stdlib/trim_suffix.md deleted file mode 100644 index 4741007ebe..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_suffix.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_suffix/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_suffix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_suffix/ -description: Learn about trim_suffix -title: trim_suffix ---- - -# trim_suffix - -`trim_suffix` removes the suffix from the end of a string. - -## Examples - -```river -> trim_suffix("helloworld", "world") -"hello" -``` diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md deleted file mode 100644 index 12d1578685..0000000000 --- a/docs/sources/flow/release-notes.md +++ /dev/null @@ -1,634 +0,0 @@ ---- -aliases: -- ./upgrade-guide/ -- /docs/grafana-cloud/agent/flow/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/release-notes/ -- /docs/grafana-cloud/send-data/agent/flow/release-notes/ -canonical: https://grafana.com/docs/agent/latest/flow/release-notes/ -description: Release notes for Grafana Agent Flow -menuTitle: Release notes -title: Release notes for Grafana Agent Flow -weight: 999 ---- - -# Release notes for {{% param "PRODUCT_NAME" %}} - -The release notes provide information about deprecations and breaking changes in {{< param "PRODUCT_NAME" >}}. - -For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -{{< admonition type="note" >}} -These release notes are specific to {{< param "PRODUCT_NAME" >}}. -Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants are contained on separate pages: - -* [Static mode release notes][release-notes-static] -* [Static mode Kubernetes operator release notes][release-notes-operator] - -[release-notes-static]: {{< relref "../static/release-notes.md" >}} -[release-notes-operator]: {{< relref "../operator/release-notes.md" >}} -{{< /admonition >}} - -## v0.40 - -### Breaking change: Prohibit the configuration of services within modules. - -Previously it was possible to configure the HTTP service via the [HTTP config block](https://grafana.com/docs/agent/v0.39/flow/reference/config-blocks/http/) inside of a module. -This functionality is now only available in the main configuration. - -### Breaking change: Change the default value of `disable_high_cardinality_metrics` to `true`. - -The `disable_high_cardinality_metrics` configuration argument is used by `otelcol.exporter` components such as `otelcol.exporter.otlp`. -If you need to see high cardinality metrics containing labels such as IP addresses and port numbers, you now have to explicitly set `disable_high_cardinality_metrics` to `false`. - -### Breaking change: Rename component `prometheus.exporter.agent` to `prometheus.exporter.self` - -The name `prometheus.exporter.agent` is potentially ambiguous and can be misinterpreted as an exporter for Prometheus Agent. -The new name reflects the component's true purpose as an exporter of the process's own metrics. - -## v0.39 - -### Breaking change: `otelcol.receiver.prometheus` will drop all `otel_scope_info` metrics when converting them to OTLP - -* If the `otel_scope_info` metric has the `otel_scope_name` and `otel_scope_version` labels, - their values are used to set the OTLP Instrumentation Scope name and version, respectively. -* Labels for `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` - are added as scope attributes with the matching name and version. - -### Breaking change: label for `target` block in `prometheus.exporter.blackbox` is removed - -Previously in `prometheus.exporter.blackbox`, the `target` block requires a label which is used in job's name. -In this version, user needs to be specify `name` attribute instead, which allow less restrictive naming. - -Old configuration example: - -```river -prometheus.exporter.blackbox "example" { - config_file = "blackbox_modules.yml" - - target "grafana" { - address = "http://grafana.com" - module = "http_2xx" - labels = { - "env": "dev", - } - } -} -``` - -New configuration example: - -```river -prometheus.exporter.blackbox "example" { - config_file = "blackbox_modules.yml" - - target { - name = "grafana" - address = "http://grafana.com" - module = "http_2xx" - labels = { - "env": "dev", - } - } -} -``` - -## v0.38 - -### Breaking change: `otelcol.exporter.jaeger` component removed - -The deprecated `otelcol.exporter.jaeger` component has been removed. To send -traces to Jaeger, use `otelcol.exporter.otlp` and a version of Jaeger that -supports OTLP. - -## v0.37 - -### Breaking change: Renamed `non_indexed_labels` Loki processing stage to `structured_metadata`. - -If you use the Loki processing stage in your {{< param "PRODUCT_NAME" >}} configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. - -Old configuration example: - -```river -stage.non_indexed_labels { - values = {"app" = ""} -} -``` - -New configuration example: -```river -stage.structured_metadata { - values = {"app" = ""} -} -``` - -### Breaking change: `otelcol.exporter.prometheus` scope labels updated - -There are 2 changes to the way scope labels work for this component. - -* Previously, the `include_scope_info` argument would trigger including -`otel_scope_name` and `otel_scope_version` in metrics. This is now defaulted -to `true` and controlled via the `include_scope_labels` argument. - -* A bugfix was made to rename `otel_scope_info` metric labels from -`name` to `otel_scope_name` and `version` to `otel_scope_version`. This is -now correct with the OTLP Instrumentation Scope specification. - -### Breaking change: `prometheus.exporter.unix` now requires a label. - -Previously the exporter was a singleton and did not require a label. The exporter now can be used multiple times and -needs a label. - -Old configuration example: - -```river -prometheus.exporter.unix { /* ... */ } -``` - -New configuration example: - -```river -prometheus.exporter.unix "example" { /* ... */ } -``` - -## v0.36 - -### Breaking change: The default value of `retry_on_http_429` is changed to `true` for the `queue_config` in `prometheus.remote_write` - -The default value of `retry_on_http_429` is changed from `false` to `true` for the `queue_config` block in `prometheus.remote_write` -so that {{< param "PRODUCT_ROOT_NAME" >}} can retry sending and avoid data being lost for metric pipelines by default. - -* If you set the `retry_on_http_429` explicitly - no action is required. -* If you do not set `retry_on_http_429` explicitly and you do *not* want to retry on HTTP 429, make sure you set it to `false` as you upgrade to this new version. - -### Breaking change: `loki.source.file` no longer automatically extracts logs from compressed files - -`loki.source.file` component will no longer automatically detect and decompress -logs from compressed files (this was an undocumented behaviour). - -This file-extension-based detection of compressed files has been replaced by a -new configuration block that explicitly enables and specifies the compression -format. By default, the decompression of files is entirely disabled. - -How to migrate: - -* If {{< param "PRODUCT_NAME" >}} never reads logs from files with - extensions `.gz`, `.tar.gz`, `.z` or `.bz2` then no action is required. - > You can check what are the file extensions {{< param "PRODUCT_NAME" >}} reads from by looking - at the `path` label on `loki_source_file_file_bytes_total` metric. - -* If {{< param "PRODUCT_NAME" >}} extracts data from compressed files, please add the following - configuration block to your `loki.source.file` component: - - ```river - loki.source.file "example" { - ... - decompression { - enabled = true - format = "" - } - } - ``` - - where the `` is the appropriate compression format - - see [`loki.source.file` documentation][loki-source-file-docs] for details. - - [loki-source-file-docs]: {{< relref "./reference/components/loki.source.file.md" >}} - -## v0.35 - -### Breaking change: `auth` and `version` attributes from `walk_params` block of `prometheus.exporter.snmp` have been removed - -The `prometheus.exporter.snmp` flow component wraps a new version of SNMP exporter which introduces a new configuration file format. -This new format separates the walk and metric mappings from the connection and authentication settings. This allows for easier configuration of different -auth params without having to duplicate the full walk and metric mapping. - -Old configuration example: - -```river -prometheus.exporter.snmp "example" { - config_file = "snmp_modules.yml" - - target "network_switch_1" { - address = "192.168.1.2" - module = "if_mib" - walk_params = "public" - } - - walk_param "public" { - retries = "2" - version = "2" - auth { - community = "public" - } - } -} -``` - -New configuration example: - -```river -prometheus.exporter.snmp "example" { - config_file = "snmp_modules.yml" - - target "network_switch_1" { - address = "192.168.1.2" - module = "if_mib" - walk_params = "public" - auth = "public_v2" - } - - walk_param "public" { - retries = "2" - } -} -``` - -See [Module and Auth Split Migration](https://github.com/prometheus/snmp_exporter/blob/main/auth-split-migration.md) for more details. - -### Breaking change: `discovery.file` has been renamed to `local.file_match` - -The `discovery.file` component has been renamed to `local.file_match` to make -its purpose more clear: to find files on the local filesystem matching a -pattern. - -Renaming `discovery.file` to `local.file_match` also resolves a point of -confusion where `discovery.file` was thought to implement Prometheus' file -service discovery. - -Old configuration example: - -```river -discovery.kubernetes "k8s" { - role = "pod" -} - -discovery.relabel "k8s" { - targets = discovery.kubernetes.k8s.targets - - rule { - source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_name"] - target_label = "job" - separator = "/" - } - - rule { - source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] - target_label = "__path__" - separator = "/" - replacement = "/var/log/pods/*$1/*.log" - } -} - -discovery.file "pods" { - path_targets = discovery.relabel.k8s.output -} -``` - -New configuration example: - -```river -discovery.kubernetes "k8s" { - role = "pod" -} - -discovery.relabel "k8s" { - targets = discovery.kubernetes.k8s.targets - - rule { - source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_name"] - target_label = "job" - separator = "/" - } - - rule { - source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] - target_label = "__path__" - separator = "/" - replacement = "/var/log/pods/*$1/*.log" - } -} - -local.file_match "pods" { - path_targets = discovery.relabel.k8s.output -} -``` - -### Breaking change: `discovery_target_decode` has been removed from the River standard library - -The `discovery_target_decode` function was initially added to the River -standard library as an equivalent to Prometheus' file-based discovery and -HTTP-based discovery methods. - -However, the Prometheus discovery mechanisms have more functionality than -`discovery_target_decode`: - -* Prometheus' `file_sd_configs` can use many files based on pattern matching. -* Prometheus' `http_sd_configs` also support YAML files. - -Additionally, it is no longer an accepted pattern to have component-specific -functions to the River standard library. - -As a result, `discovery_target_decode` has been removed in favor of using -components. - -Old configuration example: - -```river -remote.http "example" { - url = URL_CONTAINING_TARGETS -} - -prometehus.scrape "example" { - targets = discovery_target_decode(remote.http.example.content) - forward_to = FORWARD_LIST -} -``` - -New configuration example: - -```river -discovery.http "example" { - url = URL_CONTAINING_TARGETS -} - -prometehus.scrape "example" { - targets = discovery.http.example.targets - forward_to = FORWARD_LIST -} -``` - -### Breaking change: The algorithm for the "hash" action of `otelcol.processor.attributes` has changed -The hash produced when using `action = "hash"` in the `otelcol.processor.attributes` flow component is now using the more secure SHA-256 algorithm. -The change was made in PR [#22831](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/22831) of opentelemetry-collector-contrib. - -### Breaking change: `otelcol.exporter.loki` now includes instrumentation scope in its output - -Additional `instrumentation_scope` information will be added to the OTLP log signal, like this: -``` -{ - "body": "Example log", - "traceid": "01020304000000000000000000000000", - "spanid": "0506070800000000", - "severity": "error", - "attributes": { - "attr1": "1", - "attr2": "2" - }, - "resources": { - "host.name": "something" - }, - "instrumentation_scope": { - "name": "example-logger-name", - "version": "v1" - } -} -``` - -### Breaking change: `otelcol.extension.jaeger_remote_sampling` removes the `/` HTTP endpoint - -The `/` HTTP endpoint was the same as the `/sampling` endpoint. The `/sampling` endpoint is still functional. -The change was made in PR [#18070](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18070) of opentelemetry-collector-contrib. - -### Breaking change: The `remote_sampling` block has been removed from `otelcol.receiver.jaeger` - -The `remote_sampling` block in `otelcol.receiver.jaeger` has been an undocumented no-op configuration for some time, and has now been removed. -Customers are advised to use `otelcol.extension.jaeger_remote_sampling` instead. - -### Deprecation: `otelcol.exporter.jaeger` has been deprecated and will be removed in {{% param "PRODUCT_NAME" %}} v0.38.0. - -This is because Jaeger supports OTLP directly and OpenTelemetry Collector is also removing its -[Jaeger receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter). - -## v0.34 - -### Breaking change: `phlare.scrape` and `phlare.write` have been renamed to `pyroscope.scrape` and `pyroscope.scrape` - -Old configuration example: - -```river -phlare.write "staging" { - endpoint { - url = "http://phlare:4100" - } -} - -phlare.scrape "default" { - targets = [ - {"__address__" = "agent:12345", "app"="agent"}, - ] - forward_to = [phlare.write.staging.receiver] -} -``` - -New configuration example: - -```river -pyroscope.write "staging" { - endpoint { - url = "http://pyroscope:4100" - } -} - -pyroscope.scrape "default" { - targets = [ - {"__address__" = "agent:12345", "app"="agent"}, - ] - forward_to = [pyroscope.write.staging.receiver] -} -``` - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -## v0.32 - -### Breaking change: `http_client_config` Flow blocks merged with parent blocks - -To reduce the amount of typing required to write Flow components, the arguments -and subblocks found in `http_client_config` have been merged with their parent -blocks: - -- `discovery.docker > http_client_config` is merged into the `discovery.docker` block. -- `discovery.kubernetes > http_client_config` is merged into the `discovery.kubernetes` block. -- `loki.source.kubernetes > client > http_client_config` is merged into the `client` block. -- `loki.source.podlogs > client > http_client_config` is merged into the `client` block. -- `loki.write > endpoint > http_client_config` is merged into the `endpoint` block. -- `mimir.rules.kubernetes > http_client_config` is merged into the `mimir.rules.kubernetes` block. -- `otelcol.receiver.opencensus > grpc` is merged into the `otelcol.receiver.opencensus` block. -- `otelcol.receiver.zipkin > http` is merged into the `otelcol.receiver.zipkin` block. -- `phlare.scrape > http_client_config` is merged into the `phlare.scrape` block. -- `phlare.write > endpoint > http_client_config` is merged into the `endpoint` block. -- `prometheus.remote_write > endpoint > http_client_config` is merged into the `endpoint` block. -- `prometheus.scrape > http_client_config` is merged into the `prometheus.scrape` block. - -Old configuration example: - -```river -prometheus.remote_write "example" { - endpoint { - url = URL - - http_client_config { - basic_auth { - username = BASIC_AUTH_USERNAME - password = BASIC_AUTH_PASSWORD - } - } - } -} -``` - -New configuration example: - -```river -prometheus.remote_write "example" { - endpoint { - url = URL - - basic_auth { - username = BASIC_AUTH_USERNAME - password = BASIC_AUTH_PASSWORD - } - } -} -``` - -### Breaking change: `loki.process` stage blocks combined into new blocks - -Previously, to add a stage to `loki.process`, two blocks were needed: a block -called `stage`, then an inner block for the stage being written. Stage blocks -are now a single block called `stage.STAGENAME`. - -Old configuration example: - -```river -loki.process "example" { - forward_to = RECEIVER_LIST - - stage { - docker {} - } - - stage { - json { - expressions = { output = "log", extra = "" } - } - } -} -``` - -New configuration example: - -```river -loki.process "example" { - forward_to = RECEIVER_LIST - - stage.docker {} - - stage.json { - expressions = { output = "log", extra = "" } - } -} -``` - -### Breaking change: `client_options` block renamed in `remote.s3` component - -To synchronize naming conventions between `remote.s3` and `remote.http`, the -`client_options` block has been renamed `client`. - -Old configuration example: - -```river -remote.s3 "example" { - path = S3_PATH - - client_options { - key = ACCESS_KEY - secret = KEY_SECRET - } -} -``` - -New configuration example: - -```river -remote.s3 "example" { - path = S3_PATH - - client { - key = ACCESS_KEY - secret = KEY_SECRET - } -} -``` - -### Breaking change: `prometheus.integration.node_exporter` component name changed - -The `prometheus.integration.node_exporter` component has been renamed to -`prometheus.exporter.unix`. `unix` was chosen as a name to approximate the -\*nix-like systems the exporter supports. - -Old configuration example: - -```river -prometheus.integration.node_exporter { } -``` - -New configuration example: - -```river -prometheus.exporter.unix { } -``` - -### Breaking change: support for `EXPERIMENTAL_ENABLE_FLOW` environment variable removed - -As first announced in v0.30.0, support for using the `EXPERIMENTAL_ENABLE_FLOW` -environment variable to enable Flow mode has been removed. - -To enable {{< param "PRODUCT_NAME" >}}, set the `AGENT_MODE` environment variable to `flow`. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, the `agent` release binary name is now prefixed -with `grafana-`: - -- `agent` is now `grafana-agent`. - -For the `grafana/agent` Docker container, the entrypoint is now -`/bin/grafana-agent`. A symbolic link from `/bin/agent` to the new binary has -been added. - -Symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.30 - -### Deprecation: `EXPERIMENTAL_ENABLE_FLOW` environment variable changed - -As part of graduating {{< param "PRODUCT_NAME" >}} to beta, the -`EXPERIMENTAL_ENABLE_FLOW` environment variable is replaced by setting -`AGENT_MODE` to `flow`. - -Setting `EXPERIMENTAL_ENABLE_FLOW` to `1` or `true` is now deprecated and -support for it will be removed for the v0.32 release. - -## v0.29 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The binary name `agent` has been deprecated and will be renamed to -`grafana-agent` in the v0.31.0 release. - -As part of this change, the Docker containers for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. diff --git a/docs/sources/flow/tasks/_index.md b/docs/sources/flow/tasks/_index.md deleted file mode 100644 index 4ca62e8c13..0000000000 --- a/docs/sources/flow/tasks/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/ -- getting_started/ # /docs/agent/latest/flow/getting_started/ -- getting-started/ # /docs/agent/latest/flow/getting-started/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/ -description: How to perform common tasks with Grafana Agent Flow -menuTitle: Tasks -title: Tasks with Grafana Agent Flow -weight: 200 ---- - -# Tasks with {{% param "PRODUCT_NAME" %}} - -This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tasks/configure-agent-clustering.md b/docs/sources/flow/tasks/configure-agent-clustering.md deleted file mode 100644 index d8539914fc..0000000000 --- a/docs/sources/flow/tasks/configure-agent-clustering.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/configure-agent-clustering/ -- ../getting-started/configure-agent-clustering/ # /docs/agent/latest/flow/getting-started/configure-agent-clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure-agent-clustering/ -description: Learn how to configure Grafana Agent clustering in an existing installation -menuTitle: Configure Grafana Agent clustering -title: Configure Grafana Agent clustering in an existing installation -weight: 400 ---- - -# Configure {{% param "PRODUCT_NAME" %}} clustering in an existing installation - -You can configure {{< param "PRODUCT_NAME" >}} to run with [clustering][] so that individual {{< param "PRODUCT_ROOT_NAME" >}}s can work together for workload distribution and high availability. - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking -> changes and may be replaced with equivalent functionality that covers the same use case. - -This topic describes how to add clustering to an existing installation. - -## Configure {{% param "PRODUCT_NAME" %}} clustering with Helm Chart - -This section guides you through enabling clustering when {{< param "PRODUCT_NAME" >}} is installed on Kubernetes using the {{< param "PRODUCT_ROOT_NAME" >}} [Helm chart][install-helm]. - -### Before you begin - -- Ensure that your `values.yaml` file has `controller.type` set to `statefulset`. - -### Steps - -To configure clustering: - -1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `agent` block. - - ```yaml - agent: - clustering: - enabled: true - ``` - -1. Upgrade your installation to use the new `values.yaml` file: - - ```bash - helm upgrade -f values.yaml - ``` - - Replace the following: - - - _``_: The name of the installation you chose when you installed the Helm chart. - -1. Use the {{< param "PRODUCT_NAME" >}} [UI][] to verify the cluster status: - - 1. Click **Clustering** in the navigation bar. - - 1. Ensure that all expected nodes appear in the resulting table. - -{{% docs/reference %}} -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[install-helm]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes.md" -[install-helm]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/configure/_index.md b/docs/sources/flow/tasks/configure/_index.md deleted file mode 100644 index c44ea3dc02..0000000000 --- a/docs/sources/flow/tasks/configure/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/ -- ../setup/configure/ # /docs/agent/latest/flow/setup/configure/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/ -description: Configure Grafana Agent Flow after it is installed -menuTitle: Configure -title: Configure Grafana Agent Flow -weight: 90 ---- - -# Configure {{% param "PRODUCT_NAME" %}} - -You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. -The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: - -* Linux: `/etc/grafana-agent-flow.river` -* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` -* Windows: `C:\Program Files\Grafana Agent Flow\config.river` - -This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. - -{{< section >}} - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md b/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md deleted file mode 100644 index ee3a3fd982..0000000000 --- a/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/distribute-prometheus-scrape-load/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- ../getting-started/distribute-prometheus-scrape-load/ # /docs/agent/latest/flow/getting-started/distribute-prometheus-scrape-load/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/distribute-prometheus-scrape-load/ -description: Learn how to distribute your Prometheus metrics scrape load -menuTitle: Distribute Prometheus metrics scrape load -title: Distribute Prometheus metrics scrape load -weight: 500 ---- - -# Distribute Prometheus metrics scrape load - -A good predictor for the size of an {{< param "PRODUCT_NAME" >}} deployment is the number of Prometheus targets each {{< param "PRODUCT_ROOT_NAME" >}} scrapes. -[Clustering][] with target auto-distribution allows a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together to dynamically distribute their scrape load, providing high-availability. - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking -> changes and may be replaced with equivalent functionality that covers the same use case. - -## Before you begin - -- Familiarize yourself with how to [configure existing {{< param "PRODUCT_NAME" >}} installations][configure-grafana-agent]. -- [Configure Prometheus metrics collection][]. -- [Configure clustering][]. -- Ensure that all of your clustered {{< param "PRODUCT_ROOT_NAME" >}}s have the same configuration file. - -## Steps - -To distribute Prometheus metrics scrape load with clustering: - -1. Add the following block to all `prometheus.scrape` components, which should use auto-distribution: - - ```river - clustering { - enabled = true - } - ``` - -1. Restart or reload {{< param "PRODUCT_ROOT_NAME" >}}s for them to use the new configuration. - -1. Validate that auto-distribution is functioning: - - 1. Using the {{< param "PRODUCT_ROOT_NAME" >}} [UI][] on each {{< param "PRODUCT_ROOT_NAME" >}}, navigate to the details page for one of the `prometheus.scrape` components you modified. - - 1. Compare the Debug Info sections between two different {{< param "PRODUCT_ROOT_NAME" >}} to ensure that they're not scraping the same sets of targets. - -{{% docs/reference %}} -[Clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[Clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[configure-grafana-agent]: "/docs/agent/ -> /docs/agent//flow/tasks/configure" -[configure-grafana-agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure" -[Configure Prometheus metrics collection]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[Configure Prometheus metrics collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics.md" -[Configure clustering]: "/docs/agent/ -> /docs/agent//flow/tasks/configure-agent-clustering.md" -[Configure clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/estimate-resource-usage.md b/docs/sources/flow/tasks/estimate-resource-usage.md deleted file mode 100644 index f3ed1b7aed..0000000000 --- a/docs/sources/flow/tasks/estimate-resource-usage.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -aliases: - - /docs/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ - # Previous page aliases for backwards compatibility: - - /docs/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/monitoring/resource-usage/ - - ../monitoring/resource-usage/ # /docs/agent/latest/flow/monitoring/resource-usage/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/resource-usage/ -description: Estimate expected Grafana Agent resource usage -headless: true -title: Estimate resource usage -menuTitle: Estimate resource usage -weight: 190 ---- - -# Estimate {{% param "PRODUCT_NAME" %}} resource usage - -This page provides guidance for expected resource usage of -{{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational -experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. - -{{< admonition type="note" >}} -The resource usage depends on the workload, hardware, and the configuration used. -The information on this page is a good starting point for most users, but your -actual usage may be different. -{{< /admonition >}} - -## Prometheus metrics - -The Prometheus metrics resource usage depends mainly on the number of active -series that need to be scraped and the scrape interval. - -As a rule of thumb, **per each 1 million active series** and with the default -scrape interval, you can expect to use approximately: - -* 0.4 CPU cores -* 11 GiB of memory -* 1.5 MiB/s of total network bandwidth, send and receive - -These recommendations are based on deployments that use [clustering][], but they -will broadly apply to other deployment modes. For more information on how to -deploy {{< param "PRODUCT_NAME" >}}, see [deploying grafana agent][]. - -[deploying grafana agent]: {{< relref "../get-started/deploy-agent.md" >}} -[clustering]: {{< relref "../concepts/clustering.md" >}} - -## Loki logs - -Loki logs resource usage depends mainly on the volume of logs ingested. - -As a rule of thumb, **per each 1 MiB/second of logs ingested**, you can expect -to use approximately: - -* 1 CPU core -* 120 MiB of memory - -These recommendations are based on Kubernetes DaemonSet deployments on clusters -with relatively small number of nodes and high logs volume on each. The resource -usage can be higher per each 1 MiB/second of logs if you have a large number of -small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. - -Additionally, factors such as number of labels, number of files and average log -line length may all play a role in the resource usage. - -## Pyroscope profiles - -Pyroscope profiles resource usage depends mainly on the volume of profiles. - -As a rule of thumb, **per each 100 profiles/second**, you can expect to use -approximately: - -* 1 CPU core -* 10 GiB of memory - -Factors such as size of each profile and frequency of fetching them also play a -role in the overall resource usage. diff --git a/docs/sources/flow/tasks/migrate/_index.md b/docs/sources/flow/tasks/migrate/_index.md deleted file mode 100644 index a0c98966dc..0000000000 --- a/docs/sources/flow/tasks/migrate/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/ -description: How to migrate to Grafana Agent Flow -menuTitle: Migrate -title: Migrate to Grafana Agent Flow -weight: 100 ---- - -# How to migrate to {{% param "PRODUCT_NAME" %}} - -This section details how to migrate to {{< param "PRODUCT_NAME" >}} from other -common solutions. - -{{< section >}} diff --git a/docs/sources/flow/tasks/monitor/_index.md b/docs/sources/flow/tasks/monitor/_index.md deleted file mode 100644 index ac23db2607..0000000000 --- a/docs/sources/flow/tasks/monitor/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/ -- ../monitoring/ # /docs/agent/latest/flow/monitoring/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/monitor/ -description: Learn about monitoring Grafana Agent Flow -title: Monitor Grafana Agent Flow -menuTitle: Monitor -weight: 110 ---- - -# How to monitor {{% param "PRODUCT_NAME" %}} - -This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tasks/monitor/component_metrics.md b/docs/sources/flow/tasks/monitor/component_metrics.md deleted file mode 100644 index 5b3693a1f1..0000000000 --- a/docs/sources/flow/tasks/monitor/component_metrics.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/component_metrics/ -- component-metrics/ # /docs/agent/latest/flow/tasks/monitor/component-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/component_metrics/ -- ../../monitoring/component-metrics/ # /docs/agent/latest/flow/monitoring/component-metrics/ -- ../../monitoring/component_metrics/ # /docs/agent/latest/flow/monitoring/component_metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/component_metrics/ -description: Learn how to monitor component metrics -title: Monitor components -weight: 200 ---- - -# How to monitor components - -{{< param "PRODUCT_NAME" >}} [components][] may optionally expose Prometheus metrics which can be used to investigate the behavior of that component. -These component-specific metrics are only generated when an instance of that component is running. - -> Component-specific metrics are different than any metrics being processed by the component. -> Component-specific metrics are used to expose the state of a component for observability, alerting, and debugging. - -Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to > modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. - -Component-specific metrics have a `component_id` label matching the component ID generating those metrics. -For example, component-specific metrics for a `prometheus.remote_write` component labeled `production` will have a `component_id` label with the value `prometheus.remote_write.production`. - -The [reference documentation][] for each component described the list of component-specific metrics that the component exposes. -Not all components expose metrics. - -{{% docs/reference %}} -[components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[reference documentation]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[reference documentation]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/tasks/monitor/controller_metrics.md b/docs/sources/flow/tasks/monitor/controller_metrics.md deleted file mode 100644 index 0ba7617032..0000000000 --- a/docs/sources/flow/tasks/monitor/controller_metrics.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/controller_metrics/ -- controller-metrics/ # /docs/agent/latest/flow/tasks/monitor/controller-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/controller_metrics/ -- ../../monitoring/controller-metrics/ # /docs/agent/latest/flow/monitoring/controller-metrics/ -- ../../monitoring/controller_metrics/ # /docs/agent/latest/flow/monitoring/controller_metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/controller_metrics/ -description: Learn how to monitor controller metrics -title: Monitor controller -weight: 100 ---- - -# How to monitor controller - -The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus metrics which you can use to investigate the controller state. - -Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. - -The controller exposes the following metrics: - -* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. - This value may be misrepresented depending on how fast evaluations complete or how often evaluations occur. -* `agent_component_controller_running_components` (Gauge): The current number of running components by health. - The health is represented in the `health_type` label. -* `agent_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. -* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. -* `agent_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. - -{{% docs/reference %}} -[component controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller.md" -[component controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tutorials/_index.md b/docs/sources/flow/tutorials/_index.md deleted file mode 100644 index d695d7fb13..0000000000 --- a/docs/sources/flow/tutorials/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/ -description: Learn how to use Grafana Agent Flow -title: Tutorials -weight: 300 ---- - -# Tutorials - -This section provides tutorials for learning how to use {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/_index.md b/docs/sources/flow/tutorials/flow-by-example/_index.md deleted file mode 100644 index d9b0373502..0000000000 --- a/docs/sources/flow/tutorials/flow-by-example/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/ -description: Learn how to use Grafana Agent Flow -title: Flow by example -weight: 100 ---- - -# Flow by example - -This section provides a set of step-by-step tutorials that show how to use {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/get-started.md b/docs/sources/flow/tutorials/flow-by-example/get-started.md deleted file mode 100644 index 5fa1bbd5b5..0000000000 --- a/docs/sources/flow/tutorials/flow-by-example/get-started.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/faq/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/faq/ -description: Getting started with Flow-by-Example Tutorials -title: Get started -weight: 10 ---- - -## Who is this for? - -This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][flow]. It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. - -[flow]: https://grafana.com/docs/agent/latest/flow - -## What is Flow? - -Flow is a new way to configure {{< param "PRODUCT_NAME" >}}. It is a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. It is built on top of the [River](https://github.com/grafana/river) configuration language, which is designed to be fast, simple, and debuggable. - -## What do I need to get started? - -You will need a Linux or Unix environment with Docker installed. The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. - -To run the examples, you should have a Grafana Agent binary available. You can follow the instructions on how to [Install Grafana Agent as a Standalone Binary](https://grafana.com/docs/agent/latest/flow/setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary) to get a binary. - -## How should I follow along? - -You can use this docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. The examples are designed to be run locally, so you can follow along and experiment with them yourself. - -```yaml -version: '3' -services: - loki: - image: grafana/loki:2.9.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - prometheus: - image: prom/prometheus:v2.47.0 - command: - - --web.enable-remote-write-receiver - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - grafana: - environment: - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - entrypoint: - - sh - - -euc - - | - mkdir -p /etc/grafana/provisioning/datasources - cat < /etc/grafana/provisioning/datasources/ds.yaml - apiVersion: 1 - datasources: - - name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - - name: Prometheus - type: prometheus - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - version: 1 - editable: false - EOF - /run.sh - image: grafana/grafana:latest - ports: - - "3000:3000" -``` - -After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. - -The tutorials are designed to be followed in order and generally build on each other. Each example explains what it does and how it works. They are designed to be run locally, so you can follow along and experiment with them yourself. - -The Recommended Reading sections in each tutorial provide a list of documentation topics. To help you understand the concepts used in the example, read the recommended topics in the order given. diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md new file mode 100644 index 0000000000..217738d064 --- /dev/null +++ b/docs/sources/get-started/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/ +description: Learn how to install and use Grafana Alloy +menuTitle: Get started +title: Get started with Grafana Alloy +weight: 50 +--- + +# Get started with {{% param "PRODUCT_NAME" %}} + +This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. + +{{< section >}} diff --git a/docs/sources/flow/get-started/deploy-agent.md b/docs/sources/get-started/deploy-alloy.md similarity index 66% rename from docs/sources/flow/get-started/deploy-agent.md rename to docs/sources/get-started/deploy-alloy.md index 0a76e62c42..cd3fcb4e1a 100644 --- a/docs/sources/flow/get-started/deploy-agent.md +++ b/docs/sources/get-started/deploy-alloy.md @@ -1,27 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/deploy-agent/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/deploy-agent/ -- ../setup/deploy-agent/ # /docs/agent/latest/flow/setup/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/deploy-agent/ -description: Learn about possible deployment topologies for Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/get-started/deploy-alloy/ +description: Learn about possible deployment topologies for Grafana Alloy menuTitle: Deploy -title: Grafana Agent Flow deployment topologies +title: Grafana Alloy deployment topologies weight: 900 --- -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} +{{< docs/shared source="alloy" lookup="/deploy-alloy.md" version="" >}} ## Processing different types of telemetry in different {{< param "PRODUCT_ROOT_NAME" >}} instances -If the load on {{< param "PRODUCT_ROOT_NAME" >}} is small, it is recommended to process all necessary telemetry signals in the same {{< param "PRODUCT_ROOT_NAME" >}} process. +If the load on {{< param "PRODUCT_ROOT_NAME" >}} is small, it is recommended to process all necessary telemetry signals in the same {{< param "PRODUCT_ROOT_NAME" >}} process. For example, a single {{< param "PRODUCT_ROOT_NAME" >}} can process all of the incoming metrics, logs, traces, and profiles. However, if the load on the {{< param "PRODUCT_ROOT_NAME" >}}s is big, it may be beneficial to process different telemetry signals in different deployments of {{< param "PRODUCT_ROOT_NAME" >}}s. @@ -30,7 +19,7 @@ This provides better stability due to the isolation between processes. For example, an overloaded {{< param "PRODUCT_ROOT_NAME" >}} processing traces won't impact an {{< param "PRODUCT_ROOT_NAME" >}} processing metrics. Different types of signal collection require different methods for scaling: -* "Pull" components such as `prometheus.scrape` and `pyroscope.scrape` are scaled using hashmod sharing or clustering. +* "Pull" components such as `prometheus.scrape` and `pyroscope.scrape` are scaled using hashmod sharing or clustering. * "Push" components such as `otelcol.receiver.otlp` are scaled by placing a load balancer in front of them. ### Traces @@ -49,12 +38,10 @@ To decide whether scaling is necessary, check metrics such as: #### Stateful and stateless components -In the context of tracing, a "stateful component" is a component -that needs to aggregate certain spans to work correctly. +In the context of tracing, a "stateful component" is a component that needs to aggregate certain spans to work correctly. A "stateless {{< param "PRODUCT_ROOT_NAME" >}}" is a {{< param "PRODUCT_ROOT_NAME" >}} which does not contain stateful components. -Scaling stateful {{< param "PRODUCT_ROOT_NAME" >}}s is more difficult, because spans must be forwarded to a -specific {{< param "PRODUCT_ROOT_NAME" >}} according to a span property such as trace ID or a `service.name` attribute. +Scaling stateful {{< param "PRODUCT_ROOT_NAME" >}}s is more difficult, because spans must be forwarded to a specific {{< param "PRODUCT_ROOT_NAME" >}} according to a span property such as trace ID or a `service.name` attribute. You can forward spans with `otelcol.exporter.loadbalancing`. Examples of stateful components: @@ -65,8 +52,8 @@ Examples of stateful components: -A "stateless component" does not need to aggregate specific spans to work correctly - -it can work correctly even if it only has some of the spans of a trace. +A "stateless component" doesn't need to aggregate specific spans to work correctly. +It can work correctly even if it only has some of the spans of a trace. A stateless {{< param "PRODUCT_ROOT_NAME" >}} can be scaled without using `otelcol.exporter.loadbalancing`. For example, you could use an off-the-shelf load balancer to do a round-robin load balancing. diff --git a/docs/sources/get-started/install/_index.md b/docs/sources/get-started/install/_index.md new file mode 100644 index 0000000000..4ccae7825e --- /dev/null +++ b/docs/sources/get-started/install/_index.md @@ -0,0 +1,31 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/ +description: Learn how to install Grafana Agent Flow +menuTitle: Install +title: Install Grafana Agent Flow +weight: 50 +--- + +# Install {{% param "PRODUCT_NAME" %}} + +You can install {{< param "PRODUCT_NAME" >}} on Docker, Kubernetes, Linux, macOS, or Windows. + +The following architectures are supported: + +- Linux: AMD64, ARM64 +- Windows: AMD64 +- macOS: AMD64 (Intel), ARM64 (Apple Silicon) +- FreeBSD: AMD64 + +{{< admonition type="note" >}} +Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. +{{< /admonition >}} + +{{< section >}} + +## Data collection + +By default, {{< param "PRODUCT_NAME" >}} sends anonymous usage information to Grafana Labs. +Refer to [data collection][] for more information about what data is collected and how you can opt-out. + +[data collection]: "../../../data-collection/ diff --git a/docs/sources/flow/get-started/install/ansible.md b/docs/sources/get-started/install/ansible.md similarity index 70% rename from docs/sources/flow/get-started/install/ansible.md rename to docs/sources/get-started/install/ansible.md index 837f5553eb..bbd8209f89 100644 --- a/docs/sources/flow/get-started/install/ansible.md +++ b/docs/sources/get-started/install/ansible.md @@ -1,13 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/ansible/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/ansible/ -description: Learn how to install Grafana Agent Flow with Ansible +canonical: https://grafana.com/docs/alloy/latest/get-started/install/ansible/ +description: Learn how to install Grafana Alloy with Ansible menuTitle: Ansible -title: Install Grafana Agent Flow with Ansible +title: Install Grafana Alloy with Ansible weight: 550 --- @@ -17,7 +12,7 @@ You can use Ansible to install and manage {{< param "PRODUCT_NAME" >}} on Linux ## Before you begin -- These steps assume you already have a working [Ansible](https://www.ansible.com/) setup and a pre-existing inventory. +- These steps assume you already have a working [Ansible][] setup and a pre-existing inventory. - You can add the tasks below to any new or existing role. ## Steps @@ -45,7 +40,6 @@ To add {{% param "PRODUCT_NAME" %}} to a host: ``` Replace the following: - - _``_: The path to the River configuration file on the Ansible Controller (Localhost). 1. Run the Ansible playbook. Open a terminal window and run the following command from the Ansible playbook directory. @@ -80,7 +74,5 @@ Main PID: 3176 (agent-linux-amd) - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Ansible]: https://www.ansible.com/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/binary.md b/docs/sources/get-started/install/binary.md similarity index 50% rename from docs/sources/flow/get-started/install/binary.md rename to docs/sources/get-started/install/binary.md index fa304df0ac..d58d142742 100644 --- a/docs/sources/flow/get-started/install/binary.md +++ b/docs/sources/get-started/install/binary.md @@ -1,17 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/binary/ -# Previous docs aliases for backwards compatibility: -- ../../install/binary/ # /docs/agent/latest/flow/install/binary/ -- /docs/grafana-cloud/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/binary/ -- ../../setup/install/binary/ # /docs/agent/latest/flow/setup/install/binary/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/binary/ +canonical: https://grafana.com/docs/alloy/latest/get-started/install/binary/ description: Learn how to install Grafana Agent Flow as a standalone binary menuTitle: Standalone title: Install Grafana Agent Flow as a standalone binary @@ -31,7 +19,7 @@ weight: 600 To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the following steps. -1. Navigate to the current {{< param "PRODUCT_ROOT_NAME" >}} [release](https://github.com/grafana/agent/releases) page. +1. Navigate to the current {{< param "PRODUCT_ROOT_NAME" >}} [release][] page. 1. Scroll down to the **Assets** section. @@ -46,14 +34,11 @@ To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the fol ``` Replace the following: - - _``_: The path to the extracted binary. ## Next steps - [Run {{< param "PRODUCT_NAME" >}}][Run] -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/binary.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary.md" -{{% /docs/reference %}} +[release]: https://github.com/grafana/alloy/releases +[Run]: ../../run/binary/ diff --git a/docs/sources/flow/get-started/install/chef.md b/docs/sources/get-started/install/chef.md similarity index 76% rename from docs/sources/flow/get-started/install/chef.md rename to docs/sources/get-started/install/chef.md index ef348384a5..1f17d1c569 100644 --- a/docs/sources/flow/get-started/install/chef.md +++ b/docs/sources/get-started/install/chef.md @@ -1,14 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/chef/ - -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/chef/ -description: Learn how to install Grafana Agent Flow with Chef +canonical: https://grafana.com/docs/alloy/latest/get-started/install/chef/ +description: Learn how to install Grafana Alloy with Chef menuTitle: Chef -title: Install Grafana Agent Flow with Chef +title: Install Grafana Alloy with Chef weight: 550 --- @@ -20,7 +14,8 @@ You can use Chef to install and manage {{< param "PRODUCT_NAME" >}}. - These steps assume you already have a working [Chef][] setup. - You can add the following resources to any new or existing recipe. -- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. The tasks target Linux systems from the following families: +- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. + The tasks target Linux systems from the following families: - Debian (including Ubuntu) - RedHat Enterprise Linux - Amazon Linux @@ -97,8 +92,4 @@ The default configuration file location is `/etc/grafana-agent-flow.river`. You - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Chef]: https://www.chef.io/products/chef-infrastructure-management/ - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/docker.md b/docs/sources/get-started/install/docker.md similarity index 66% rename from docs/sources/flow/get-started/install/docker.md rename to docs/sources/get-started/install/docker.md index c7e07b1b3b..8db56e706b 100644 --- a/docs/sources/flow/get-started/install/docker.md +++ b/docs/sources/get-started/install/docker.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/docker/ -# Previous docs aliases for backwards compatibility: -- ../../install/docker/ # /docs/agent/latest/flow/install/docker/ -- /docs/grafana-cloud/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/docker/ -- ../../setup/install/docker/ # /docs/agent/latest/flow/setup/install/docker/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/docker/ -description: Learn how to install Grafana Agent Flow on Docker +canonical: https://grafana.com/docs/alloy/latest/get-started/install/docker/ +description: Learn how to install Grafana Alloy on Docker menuTitle: Docker -title: Run Grafana Agent Flow in a Docker container +title: Run Grafana Alloy in a Docker container weight: 100 --- @@ -94,10 +82,5 @@ To verify that {{< param "PRODUCT_NAME" >}} is running successfully, navigate to [Linux containers]: #run-a-linux-docker-container [Windows containers]: #run-a-windows-docker-container [Docker]: https://docker.io - -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[run]: ../../../reference/cli/run/ +[UI]: ../../../tasks/debug/#grafana-agent-flow-ui diff --git a/docs/sources/get-started/install/kubernetes.md b/docs/sources/get-started/install/kubernetes.md new file mode 100644 index 0000000000..68f93fb150 --- /dev/null +++ b/docs/sources/get-started/install/kubernetes.md @@ -0,0 +1,53 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/kubernetes/ +description: Learn how to deploy Grafana Agent Flow on Kubernetes +menuTitle: Kubernetes +title: Deploy Grafana Agent Flow on Kubernetes +weight: 200 +--- + +# Deploy {{% param "PRODUCT_NAME" %}} on Kubernetes + +{{< param "PRODUCT_NAME" >}} can be deployed on Kubernetes by using the Helm chart for {{< param "PRODUCT_ROOT_NAME" >}}. + +## Before you begin + +* Install [Helm][] on your computer. +* Configure a Kubernetes cluster that you can use for {{< param "PRODUCT_NAME" >}}. +* Configure your local Kubernetes context to point at the cluster. + +## Deploy + +To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: + +1. Add the Grafana Helm chart repository: + + ```shell + helm repo add grafana https://grafana.github.io/helm-charts + ``` + +1. Update the Grafana Helm chart repository: + + ```shell + helm repo update + ``` + +1. Install {{< param "PRODUCT_ROOT_NAME" >}}: + + ```shell + helm install grafana/grafana-agent + ``` + + Replace the following: + + - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. + +For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Helm]: https://helm.sh +[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent +[Configure]: ../../../tasks/configure/configure-kubernetes/ diff --git a/docs/sources/flow/get-started/install/linux.md b/docs/sources/get-started/install/linux.md similarity index 65% rename from docs/sources/flow/get-started/install/linux.md rename to docs/sources/get-started/install/linux.md index 2241aeb78d..a2ab220a67 100644 --- a/docs/sources/flow/get-started/install/linux.md +++ b/docs/sources/get-started/install/linux.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/linux/ -# Previous docs aliases for backwards compatibility: -- ../../install/linux/ # /docs/agent/latest/flow/install/linux/ -- /docs/grafana-cloud/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/linux/ -- ../../setup/install/linux/ # /docs/agent/latest/flow/setup/install/linux/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/linux/ -description: Learn how to install Grafana Agent Flow on Linux +canonical: https://grafana.com/docs/alloy/latest/get-started/install/linux/ +description: Learn how to install Grafana Alloy on Linux menuTitle: Linux -title: Install Grafana Agent Flow on Linux +title: Install Grafana Alloy on Linux weight: 300 --- @@ -128,9 +116,5 @@ To uninstall {{< param "PRODUCT_NAME" >}} on Linux, run the following commands i - [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Run]: ../../run/linux/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/get-started/install/macos.md b/docs/sources/get-started/install/macos.md new file mode 100644 index 0000000000..f151f0fd6a --- /dev/null +++ b/docs/sources/get-started/install/macos.md @@ -0,0 +1,70 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/macos/ +description: Learn how to install Grafana Alloy on macOS +menuTitle: macOS +title: Install Grafana Alloy on macOS +weight: 400 +--- + +# Install {{% param "PRODUCT_NAME" %}} on macOS + +You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . + +{{< admonition type="note" >}} +The default prefix for Homebrew on Intel is `/usr/local`. +The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. +To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. +{{< /admonition >}} + +## Before you begin + +* Install [Homebrew][] on your computer. + +## Install + +To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. + +1. Add the Grafana Homebrew tap: + + ```shell + brew tap grafana/grafana + ``` + +1. Install {{< param "PRODUCT_NAME" >}}: + + ```shell + brew install grafana-agent-flow + ``` + +## Upgrade + +To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. + +1. Upgrade {{< param "PRODUCT_NAME" >}}: + + ```shell + brew upgrade grafana-agent-flow + ``` + +1. Restart {{< param "PRODUCT_NAME" >}}: + + ```shell + brew services restart grafana-agent-flow + ``` + +## Uninstall + +To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: + +```shell +brew uninstall grafana-agent-flow +``` + +## Next steps + +- [Run {{< param "PRODUCT_NAME" >}}][Run] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Homebrew]: https://brew.sh +[Run]: ../../run/macos/ +[Configure]: ../../../tasks/configure/configure-macos/ diff --git a/docs/sources/flow/get-started/install/puppet.md b/docs/sources/get-started/install/puppet.md similarity index 72% rename from docs/sources/flow/get-started/install/puppet.md rename to docs/sources/get-started/install/puppet.md index db3fb2b488..021221ab45 100644 --- a/docs/sources/flow/get-started/install/puppet.md +++ b/docs/sources/get-started/install/puppet.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/puppet/ - -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/puppet/ +canonical: https://grafana.com/docs/alloy/latest/get-started/install/puppet/ description: Learn how to install Grafana Agent Flow with Puppet menuTitle: Puppet title: Install Grafana Agent Flow with Puppet @@ -41,7 +35,7 @@ To add {{< param "PRODUCT_NAME" >}} to a host: } ``` -1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-agent-flow` package, and run the service: +1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-alloy` package, and run the service: ```ruby class grafana_agent::grafana_agent_flow () { @@ -97,17 +91,14 @@ To add {{< param "PRODUCT_NAME" >}} to a host: ## Configuration -The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. +The `grafana-alloy` package installs a default configuration file that doesn't send telemetry anywhere. -The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration, or create a new configuration file for the service to use. +The default configuration file location is `/etc/grafana-alloy.river`. +You can replace this file with your own configuration, or create a new configuration file for the service to use. ## Next steps - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Puppet]: https://www.puppet.com/ - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/windows.md b/docs/sources/get-started/install/windows.md similarity index 57% rename from docs/sources/flow/get-started/install/windows.md rename to docs/sources/get-started/install/windows.md index a20ed34497..ba827e3c46 100644 --- a/docs/sources/flow/get-started/install/windows.md +++ b/docs/sources/get-started/install/windows.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/windows/ -# Previous docs aliases for backwards compatibility: -- ../../install/windows/ # /docs/agent/latest/flow/install/windows/ -- /docs/grafana-cloud/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/windows/ -- ../../setup/install/windows/ # /docs/agent/latest/flow/setup/install/windows/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/windows/ -description: Learn how to install Grafana Agent Flow on Windows +canonical: https://grafana.com/docs/alloy/latest/get-started/install/windows/ +description: Learn how to install Grafana Alloy on Windows menuTitle: Windows -title: Install Grafana Agent Flow on Windows +title: Install Grafana Alloy on Windows weight: 500 --- @@ -76,7 +64,7 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f ## Uninstall -You can uninstall {{< param "PRODUCT_NAME" >}} with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. +You can uninstall {{< param "PRODUCT_NAME" >}} with Windows Remove Programs or `C:\Program Files\Grafana Alloy\uninstaller.exe`. Uninstalling {{< param "PRODUCT_NAME" >}} stops the service and removes it from disk. This includes any configuration files in the installation directory. @@ -84,16 +72,10 @@ This includes any configuration files in the installation directory. ## Next steps -- [Run {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -[latest]: https://github.com/grafana/agent/releases/latest - -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/windows.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} +[latest]: https://github.com/grafana/alloy/releases/latest +[data collection]: ../../../data-collection/ +[Run]: ../../run/windows/ +[Configure]: ../../../tasks/configure/configure-windows/ diff --git a/docs/sources/get-started/run/_index.md b/docs/sources/get-started/run/_index.md new file mode 100644 index 0000000000..90dbc4192d --- /dev/null +++ b/docs/sources/get-started/run/_index.md @@ -0,0 +1,16 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/ +description: Learn how to run Grafana Alloy +menuTitle: Run +title: Run Grafana Alloy +weight: 50 +--- + +# Run {{% param "PRODUCT_NAME" %}} + +Use the following pages to learn how to start, restart, and stop {{< param "PRODUCT_NAME" >}} after it's installed. +For installation instructions, refer to [Install {{< param "PRODUCT_NAME" >}}][Install]. + +{{< section >}} + +[Install]: ../install/ diff --git a/docs/sources/flow/get-started/run/binary.md b/docs/sources/get-started/run/binary.md similarity index 78% rename from docs/sources/flow/get-started/run/binary.md rename to docs/sources/get-started/run/binary.md index 0b9ac5b7d7..8000ec6786 100644 --- a/docs/sources/flow/get-started/run/binary.md +++ b/docs/sources/get-started/run/binary.md @@ -1,13 +1,8 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/binary/ -description: Learn how to run Grafana Agent Flow as a standalone binary +canonical: https://grafana.com/docs/alloy/latest/flow/get-started/run/binary/ +description: Learn how to run Grafana Alloy as a standalone binary menuTitle: Standalone -title: Run Grafana Agent Flow as a standalone binary +title: Run Grafana Alloy as a standalone binary weight: 600 --- @@ -118,9 +113,5 @@ These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} c 1. Use the [Linux][StartLinux] systemd commands to manage your standalone Linux installation of {{< param "PRODUCT_NAME" >}}. -{{% docs/reference %}} -[InstallBinary]: "/docs/agent/ -> /docs/agent//flow/get-started/install/binary.md" -[InstallBinary]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/binary.md" -[StartLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" -[StartLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/run/linux.md" -{{% /docs/reference %}} +[InstallBinary]: ../../install/binary/ +[StartLinux]: ../linux/ diff --git a/docs/sources/flow/get-started/run/linux.md b/docs/sources/get-started/run/linux.md similarity index 63% rename from docs/sources/flow/get-started/run/linux.md rename to docs/sources/get-started/run/linux.md index 1085aaabdf..0fb7873d69 100644 --- a/docs/sources/flow/get-started/run/linux.md +++ b/docs/sources/get-started/run/linux.md @@ -1,10 +1,5 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/linux/ +canonical: https://grafana.com/docs/alloy/latest/get-started/run/linux/ description: Learn how to run Grafana Agent Flow on Linux menuTitle: Linux title: Run Grafana Agent Flow on Linux @@ -15,8 +10,6 @@ weight: 300 {{< param "PRODUCT_NAME" >}} is [installed][InstallLinux] as a [systemd][] service on Linux. -[systemd]: https://systemd.io/ - ## Start {{% param "PRODUCT_NAME" %}} To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: @@ -67,9 +60,6 @@ sudo journalctl -u grafana-agent-flow - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[InstallLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/install/linux.md" -[InstallLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/linux.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[InstallLinux]: ../../install/linux/ +[systemd]: https://systemd.io/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/get-started/run/macos.md b/docs/sources/get-started/run/macos.md new file mode 100644 index 0000000000..df3ef5537c --- /dev/null +++ b/docs/sources/get-started/run/macos.md @@ -0,0 +1,57 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/macos/ +description: Learn how to run Grafana Alloy on macOS +menuTitle: macOS +title: Run Grafana Alloy on macOS +weight: 400 +--- + +# Run {{% param "PRODUCT_NAME" %}} on macOS + +{{< param "PRODUCT_NAME" >}} is [installed][InstallMacOS] as a launchd service on macOS. + +## Start {{% param "PRODUCT_NAME" %}} + +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services start grafana-agent-flow +``` + +{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. + +(Optional) To verify that the service is running, run the following command in a terminal window: + +```shell +brew services info grafana-agent-flow +``` + +## Restart {{% param "PRODUCT_NAME" %}} + +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services restart grafana-agent-flow +``` + +## Stop {{% param "PRODUCT_NAME" %}} + +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services stop grafana-agent-flow +``` + +## View {{% param "PRODUCT_NAME" %}} logs on macOS + +By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and `$(brew --prefix)/var/log/grafana-agent-flow.err.log`. + +If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][ConfigureMacOS] + +[InstallMacOS]: ../../install/macos/ +[ConfigureMacOS]: ../../../tasks/configure/configure-macos/ +[ConfigureService]: ../../../tasks/configure/configure-macos/#configure-the-grafana-alloy-service diff --git a/docs/sources/get-started/run/windows.md b/docs/sources/get-started/run/windows.md new file mode 100644 index 0000000000..1943d3fe28 --- /dev/null +++ b/docs/sources/get-started/run/windows.md @@ -0,0 +1,45 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/windows/ +description: Learn how to run Grafana Alloy on Windows +menuTitle: Windows +title: Run Grafana Alloy on Windows +weight: 500 +--- + +# Run {{% param "PRODUCT_NAME" %}} on Windows + +{{< param "PRODUCT_NAME" >}} is [installed][InstallWindows] as a Windows Service. +The service is configured to automatically run on startup. + +To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: + +1. Open the Windows Services manager (services.msc): + + 1. Right click on the Start Menu and select **Run**. + + 1. Type: `services.msc` and click **OK**. + +1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. + +## View {{% param "PRODUCT_NAME" %}} logs + +When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. + +To view the logs, perform the following steps: + +1. Open the Event Viewer: + + 1. Right click on the Start Menu and select **Run**. + + 1. Type `eventvwr` and click **OK**. + +1. In the Event Viewer, click on **Windows Logs > Application**. + +1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[InstallWindows]: ../../install/windows/ +[Configure]: ../../../tasks/configure/configure-windows/ diff --git a/docs/sources/operator/_index.md b/docs/sources/operator/_index.md deleted file mode 100644 index a39241c87a..0000000000 --- a/docs/sources/operator/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/ -- /docs/grafana-cloud/send-data/agent/operator/ -canonical: https://grafana.com/docs/agent/latest/operator/ -description: Learn about the static mode Kubernetes operator -menuTitle: Static mode Kubernetes operator -title: Static mode Kubernetes operator (Beta) -weight: 300 ---- - -# Static mode Kubernetes operator (Beta) - -Grafana Agent Operator is a [Kubernetes operator][] for the [static mode][] of -Grafana Agent. It makes it easier to deploy and configure static mode to -collect telemetry data from Kubernetes resources. - -Grafana Agent Operator supports consuming various [custom resources][] for -telemetry collection: - -* Prometheus Operator [ServiceMonitor][] resources for collecting metrics from Kubernetes [Services][]. -* Prometheus Operator [PodMonitor][] resources for collecting metrics from Kubernetes [Pods][]. -* Prometheus Operator [Probe][] resources for collecting metrics from Kubernetes [Ingresses][]. -* Custom [PodLogs][] resources for collecting logs. - -{{< admonition type="note" >}} -Grafana Agent Operator does not collect traces. -{{< /admonition >}} - -Grafana Agent Operator is currently in [Beta][], and is subject to change or -being removed with functionality which covers the same use case. - -{{< admonition type="note" >}} -If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. -Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -{{< /admonition >}} - -Grafana Agent Operator uses additional custom resources to manage the deployment -and configuration of Grafana Agents running in static mode. In addition to the -supported custom resources, you can also provide your own Service Discovery -(SD) configurations to collect metrics from other types of sources. - -Grafana Agent Operator is particularly useful for Helm users, where manually -writing generic service discovery to match all of your chart installations can -be difficult, or where manually writing a specific SD for each chart -installation can be tedious. - -The following sections describe how to use Grafana Agent Operator: - -| Topic | Describes | -|---|---| -| [Configure Kubernetes Monitoring using Agent Operator](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/configure-infrastructure-manually/k8s-agent-operator/) | Use the Kubernetes Monitoring solution to set up monitoring of your Kubernetes cluster and to install preconfigured dashboards and alerts. | -| [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) | How to deploy the Grafana Agent Operator into your Kubernetes cluster using the grafana-agent-operator Helm chart. | -| [Install Grafana Agent Operator]({{< relref "./getting-started" >}}) | How to deploy the Grafana Agent Operator into your Kubernetes cluster without using Helm. | -| [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources" >}}) | How to roll out the Grafana Agent Operator custom resources, needed to begin monitoring your cluster. Complete this procedure *after* installing Grafana Agent Operator—either with or without Helm. | -| [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) | Learn about the resources used by Agent Operator to collect telemetry data and how it discovers the hierarchy of custom resources, continually reconciling the hierarchy. | -| [Set up Agent Operator integrations]({{< relref "./operator-integrations" >}}) | Learn how to set up node-exporter and mysqld-exporter integrations. | - -[Kubernetes operator]: https://www.cncf.io/blog/2022/06/15/kubernetes-operators-what-are-they-some-examples/ -[static mode]: {{< relref "../static/" >}} -[Services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[Pods]: https://kubernetes.io/docs/concepts/workloads/pods/ -[Ingresses]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[custom resources]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ -[Beta]: {{< relref "../stability.md#beta" >}} -[ServiceMonitor]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor -[PodMonitor]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor -[Probe]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe -[PodLogs]: {{< relref "./api.md#podlogs-a-namemonitoringgrafanacomv1alpha1podlogsa">}} -[Prometheus Operator]: https://github.com/prometheus-operator/prometheus-operator diff --git a/docs/sources/operator/add-custom-scrape-jobs.md b/docs/sources/operator/add-custom-scrape-jobs.md deleted file mode 100644 index 6f4fb9cc02..0000000000 --- a/docs/sources/operator/add-custom-scrape-jobs.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/send-data/agent/operator/add-custom-scrape-jobs/ -canonical: https://grafana.com/docs/agent/latest/operator/add-custom-scrape-jobs/ -description: Learn how to add custom scrape jobs -title: Add custom scrape jobs -weight: 400 ---- - -# Add custom scrape jobs - -Sometimes you want to add a scrape job for something that isn't supported by the -standard set of Prometheus Operator CRDs. A common example of this is node-level -metrics. - -To do this, you'll need to write custom scrape configs and store it in a -Kubernetes Secret: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: extra-jobs - namespace: operator -stringData: - jobs.yaml: | - -``` - -Replace `` above with the array of Prometheus scrape jobs to -include. - -For example, to collect metrics from Kubelet and cAdvisor, use the following: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: extra-jobs - namespace: operator -stringData: - jobs.yaml: | - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: integrations/kubernetes/kubelet - kubernetes_sd_configs: - - role: node - relabel_configs: - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - source_labels: [__meta_kubernetes_node_name] - replacement: /api/v1/nodes/$1/proxy/metrics - target_label: __metrics_path__ - - action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: integrations/kubernetes/cadvisor - kubernetes_sd_configs: - - role: node - relabel_configs: - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - - action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -``` - -Note that you **should** always add these two relabel_configs for each custom job: - -```yaml -- action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash -- action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash -``` - -These rules ensure if your GrafanaAgent has multiple metrics shards, only one -pod per replica will collect metrics for each job. - -Once your Secret is defined, you'll then need to add a `additionalScrapeConfigs` -field to your MetricsInstance: - -```yaml -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - labels: - name: grafana-agent - name: primary - namespace: operator -spec: - additionalScrapeConfigs: - name: extra-jobs - key: jobs.yaml - # ... Other settings ... -``` - -The Secret **MUST** be in the same namespace as the MetricsInstance. diff --git a/docs/sources/operator/api.md b/docs/sources/operator/api.md deleted file mode 100644 index 04df805f73..0000000000 --- a/docs/sources/operator/api.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -aliases: -- /docs/agent/latest/operator/crd/ -- /docs/grafana-cloud/agent/operator/api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/api/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/api/ -- /docs/grafana-cloud/send-data/agent/operator/api/ -canonical: https://grafana.com/docs/agent/latest/operator/api/ -title: Custom Resource Definition Reference -description: Learn about the Grafana Agent API -weight: 500 ---- -# Custom Resource Definition Reference -## Resource Types: -* [Deployment](#monitoring.grafana.com/v1alpha1.Deployment) -* [GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent) -* [IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment) -* [LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment) -* [MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment) -### Deployment -Deployment is a set of discovered resources relative to a GrafanaAgent. The tree of resources contained in a Deployment form the resource hierarchy used for reconciling a GrafanaAgent. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`Deployment`| -|`Agent`
_[GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent)_| Root resource in the deployment. | -|`Metrics`
_[[]MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment)_| Metrics resources discovered by Agent. | -|`Logs`
_[[]LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)_| Logs resources discovered by Agent. | -|`Integrations`
_[[]IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment)_| Integrations resources discovered by Agent. | -|`Secrets`
_[github.com/grafana/agent/internal/static/operator/assets.SecretStore](https://pkg.go.dev/github.com/grafana/agent/internal/static/operator/assets#SecretStore)_| The full list of Secrets referenced by resources in the Deployment. | -### GrafanaAgent -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -GrafanaAgent defines a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`GrafanaAgent`| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)_| Spec holds the specification of the desired behavior for the Grafana Agent cluster. | -|`logLevel`
_string_| LogLevel controls the log level of the generated pods. Defaults to "info" if not set. | -|`logFormat`
_string_| LogFormat controls the logging format of the generated pods. Defaults to "logfmt" if not set. | -|`apiServer`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.APIServerConfig)_| APIServerConfig lets you specify a host and auth methods to access the Kubernetes API server. If left empty, the Agent assumes that it is running inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount. | -|`podMetadata`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.EmbeddedObjectMetadata](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.EmbeddedObjectMetadata)_| PodMetadata configures Labels and Annotations which are propagated to created Grafana Agent pods. | -|`version`
_string_| Version of Grafana Agent to be deployed. | -|`paused`
_bool_| Paused prevents actions except for deletion to be performed on the underlying managed objects. | -|`image`
_string_| Image, when specified, overrides the image used to run Agent. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`configReloaderVersion`
_string_| Version of Config Reloader to be deployed. | -|`configReloaderImage`
_string_| Image, when specified, overrides the image used to run Config Reloader. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`imagePullSecrets`
_[[]Kubernetes core/v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#localobjectreference-v1-core)_| ImagePullSecrets holds an optional list of references to Secrets within the same namespace used for pulling the Grafana Agent image from registries. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -|`storage`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.StorageSpec)_| Storage spec to specify how storage will be used. | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| Volumes allows configuration of additional volumes on the output StatefulSet definition. The volumes specified are appended to other volumes that are generated as a result of StorageSpec objects. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| VolumeMounts lets you configure additional VolumeMounts on the output StatefulSet definition. Specified VolumeMounts are appended to other VolumeMounts generated as a result of StorageSpec objects in the Grafana Agent container. | -|`resources`
_[Kubernetes core/v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core)_| Resources holds requests and limits for individual pods. | -|`nodeSelector`
_map[string]string_| NodeSelector defines which nodes pods should be scheduling on. | -|`serviceAccountName`
_string_| ServiceAccountName is the name of the ServiceAccount to use for running Grafana Agent pods. | -|`secrets`
_[]string_| Secrets is a list of secrets in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The secrets are mounted into /var/lib/grafana-agent/extra-secrets/<secret-name>. | -|`configMaps`
_[]string_| ConfigMaps is a list of config maps in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/<configmap-name>. | -|`affinity`
_[Kubernetes core/v1.Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#affinity-v1-core)_| Affinity, if specified, controls pod scheduling constraints. | -|`tolerations`
_[[]Kubernetes core/v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#toleration-v1-core)_| Tolerations, if specified, controls the pod's tolerations. | -|`topologySpreadConstraints`
_[[]Kubernetes core/v1.TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core)_| TopologySpreadConstraints, if specified, controls the pod's topology spread constraints. | -|`securityContext`
_[Kubernetes core/v1.PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podsecuritycontext-v1-core)_| SecurityContext holds pod-level security attributes and common container settings. When unspecified, defaults to the default PodSecurityContext. | -|`containers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| Containers lets you inject additional containers or modify operator-generated containers. This can be used to add an authentication proxy to a Grafana Agent pod or to change the behavior of an operator-generated container. Containers described here modify an operator-generated container if they share the same name and if modifications are done via a strategic merge patch. The current container names are: `grafana-agent` and `config-reloader`. Overriding containers is entirely outside the scope of what the Grafana Agent team supports and by doing so, you accept that this behavior may break at any time without notice. | -|`initContainers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| InitContainers let you add initContainers to the pod definition. These can be used to, for example, fetch secrets for injection into the Grafana Agent configuration from external sources. Errors during the execution of an initContainer cause the pod to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other than secret fetching is entirely outside the scope of what the Grafana Agent maintainers support and by doing so, you accept that this behavior may break at any time without notice. | -|`priorityClassName`
_string_| PriorityClassName is the priority class assigned to pods. | -|`runtimeClassName`
_string_| RuntimeClassName is the runtime class assigned to pods. | -|`portName`
_string_| Port name used for the pods and governing service. This defaults to agent-metrics. | -|`metrics`
_[MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)_| Metrics controls the metrics subsystem of the Agent and settings unique to metrics-specific pods that are deployed. | -|`logs`
_[LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)_| Logs controls the logging subsystem of the Agent and settings unique to logging-specific pods that are deployed. | -|`integrations`
_[IntegrationsSubsystemSpec](#monitoring.grafana.com/v1alpha1.IntegrationsSubsystemSpec)_| Integrations controls the integration subsystem of the Agent and settings unique to deployed integration-specific pods. | -|`enableConfigReadAPI`
_bool_| enableConfigReadAPI enables the read API for viewing the currently running config port 8080 on the agent. +kubebuilder:default=false | -|`disableReporting`
_bool_| disableReporting disables reporting of enabled feature flags to Grafana. +kubebuilder:default=false | -|`disableSupportBundle`
_bool_| disableSupportBundle disables the generation of support bundles. +kubebuilder:default=false | -### IntegrationsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -IntegrationsDeployment is a set of discovered resources relative to an IntegrationsDeployment. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`IntegrationsDeployment`| -|`Instance`
_[Integration](#monitoring.grafana.com/v1alpha1.Integration)_| | -### LogsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -LogsDeployment is a set of discovered resources relative to a LogsInstance. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`LogsDeployment`| -|`Instance`
_[LogsInstance](#monitoring.grafana.com/v1alpha1.LogsInstance)_| | -|`PodLogs`
_[[]PodLogs](#monitoring.grafana.com/v1alpha1.PodLogs)_| | -### MetricsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -MetricsDeployment is a set of discovered resources relative to a MetricsInstance. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`MetricsDeployment`| -|`Instance`
_[MetricsInstance](#monitoring.grafana.com/v1alpha1.MetricsInstance)_| | -|`ServiceMonitors`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor)_| | -|`PodMonitors`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor)_| | -|`Probes`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe)_| | -### CRIStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -CRIStageSpec is a parsing stage that reads log lines using the standard CRI logging format. It needs no defined fields. -### DockerStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -DockerStageSpec is a parsing stage that reads log lines using the standard Docker logging format. It needs no defined fields. -### DropStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -DropStageSpec is a filtering stage that lets you drop certain logs. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from the extract data to parse. If empty, uses the log message. | -|`expression`
_string_| RE2 regular expression. If source is provided, the regex attempts to match the source. If no source is provided, then the regex attempts to attach the log line. If the provided regex matches the log line or a provided source, the line is dropped. | -|`value`
_string_| Value can only be specified when source is specified. If the value provided is an exact match for the given source then the line will be dropped. Mutually exclusive with expression. | -|`olderThan`
_string_| OlderThan will be parsed as a Go duration. If the log line's timestamp is older than the current time minus the provided duration, it will be dropped. | -|`longerThan`
_string_| LongerThan will drop a log line if it its content is longer than this value (in bytes). Can be expressed as an integer (8192) or a number with a suffix (8kb). | -|`dropCounterReason`
_string_| Every time a log line is dropped, the metric logentry_dropped_lines_total is incremented. A "reason" label is added, and can be customized by providing a custom value here. Defaults to "drop_stage". | -### GrafanaAgentSpec -(Appears on:[GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent)) -GrafanaAgentSpec is a specification of the desired behavior of the Grafana Agent cluster. -#### Fields -|Field|Description| -|-|-| -|`logLevel`
_string_| LogLevel controls the log level of the generated pods. Defaults to "info" if not set. | -|`logFormat`
_string_| LogFormat controls the logging format of the generated pods. Defaults to "logfmt" if not set. | -|`apiServer`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.APIServerConfig)_| APIServerConfig lets you specify a host and auth methods to access the Kubernetes API server. If left empty, the Agent assumes that it is running inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount. | -|`podMetadata`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.EmbeddedObjectMetadata](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.EmbeddedObjectMetadata)_| PodMetadata configures Labels and Annotations which are propagated to created Grafana Agent pods. | -|`version`
_string_| Version of Grafana Agent to be deployed. | -|`paused`
_bool_| Paused prevents actions except for deletion to be performed on the underlying managed objects. | -|`image`
_string_| Image, when specified, overrides the image used to run Agent. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`configReloaderVersion`
_string_| Version of Config Reloader to be deployed. | -|`configReloaderImage`
_string_| Image, when specified, overrides the image used to run Config Reloader. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`imagePullSecrets`
_[[]Kubernetes core/v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#localobjectreference-v1-core)_| ImagePullSecrets holds an optional list of references to Secrets within the same namespace used for pulling the Grafana Agent image from registries. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -|`storage`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.StorageSpec)_| Storage spec to specify how storage will be used. | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| Volumes allows configuration of additional volumes on the output StatefulSet definition. The volumes specified are appended to other volumes that are generated as a result of StorageSpec objects. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| VolumeMounts lets you configure additional VolumeMounts on the output StatefulSet definition. Specified VolumeMounts are appended to other VolumeMounts generated as a result of StorageSpec objects in the Grafana Agent container. | -|`resources`
_[Kubernetes core/v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core)_| Resources holds requests and limits for individual pods. | -|`nodeSelector`
_map[string]string_| NodeSelector defines which nodes pods should be scheduling on. | -|`serviceAccountName`
_string_| ServiceAccountName is the name of the ServiceAccount to use for running Grafana Agent pods. | -|`secrets`
_[]string_| Secrets is a list of secrets in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The secrets are mounted into /var/lib/grafana-agent/extra-secrets/<secret-name>. | -|`configMaps`
_[]string_| ConfigMaps is a list of config maps in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/<configmap-name>. | -|`affinity`
_[Kubernetes core/v1.Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#affinity-v1-core)_| Affinity, if specified, controls pod scheduling constraints. | -|`tolerations`
_[[]Kubernetes core/v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#toleration-v1-core)_| Tolerations, if specified, controls the pod's tolerations. | -|`topologySpreadConstraints`
_[[]Kubernetes core/v1.TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core)_| TopologySpreadConstraints, if specified, controls the pod's topology spread constraints. | -|`securityContext`
_[Kubernetes core/v1.PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podsecuritycontext-v1-core)_| SecurityContext holds pod-level security attributes and common container settings. When unspecified, defaults to the default PodSecurityContext. | -|`containers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| Containers lets you inject additional containers or modify operator-generated containers. This can be used to add an authentication proxy to a Grafana Agent pod or to change the behavior of an operator-generated container. Containers described here modify an operator-generated container if they share the same name and if modifications are done via a strategic merge patch. The current container names are: `grafana-agent` and `config-reloader`. Overriding containers is entirely outside the scope of what the Grafana Agent team supports and by doing so, you accept that this behavior may break at any time without notice. | -|`initContainers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| InitContainers let you add initContainers to the pod definition. These can be used to, for example, fetch secrets for injection into the Grafana Agent configuration from external sources. Errors during the execution of an initContainer cause the pod to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other than secret fetching is entirely outside the scope of what the Grafana Agent maintainers support and by doing so, you accept that this behavior may break at any time without notice. | -|`priorityClassName`
_string_| PriorityClassName is the priority class assigned to pods. | -|`runtimeClassName`
_string_| RuntimeClassName is the runtime class assigned to pods. | -|`portName`
_string_| Port name used for the pods and governing service. This defaults to agent-metrics. | -|`metrics`
_[MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)_| Metrics controls the metrics subsystem of the Agent and settings unique to metrics-specific pods that are deployed. | -|`logs`
_[LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)_| Logs controls the logging subsystem of the Agent and settings unique to logging-specific pods that are deployed. | -|`integrations`
_[IntegrationsSubsystemSpec](#monitoring.grafana.com/v1alpha1.IntegrationsSubsystemSpec)_| Integrations controls the integration subsystem of the Agent and settings unique to deployed integration-specific pods. | -|`enableConfigReadAPI`
_bool_| enableConfigReadAPI enables the read API for viewing the currently running config port 8080 on the agent. +kubebuilder:default=false | -|`disableReporting`
_bool_| disableReporting disables reporting of enabled feature flags to Grafana. +kubebuilder:default=false | -|`disableSupportBundle`
_bool_| disableSupportBundle disables the generation of support bundles. +kubebuilder:default=false | -### Integration -(Appears on:[IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment)) -Integration runs a single Grafana Agent integration. Integrations that generate telemetry must be configured to send that telemetry somewhere, such as autoscrape for exporter-based integrations. Integrations have access to the LogsInstances and MetricsInstances in the same GrafanaAgent resource set, referenced by the <namespace>/<name> of the Instance resource. For example, if there is a default/production MetricsInstance, you can configure a supported integration's autoscrape block with: autoscrape: enable: true metrics_instance: default/production There is currently no way for telemetry created by an Operator-managed integration to be collected from outside of the integration itself. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[IntegrationSpec](#monitoring.grafana.com/v1alpha1.IntegrationSpec)_| Specifies the desired behavior of the Integration. | -|`name`
_string_| Name of the integration to run (e.g., "node_exporter", "mysqld_exporter"). | -|`type`
_[IntegrationType](#monitoring.grafana.com/v1alpha1.IntegrationType)_| Type informs Grafana Agent Operator about how to manage the integration being configured. | -|`config`
_[k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON](https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1#JSON)_| The configuration for the named integration. Note that Integrations are deployed with the integrations-next feature flag, which has different common settings: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/ | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| An extra list of Volumes to be associated with the Grafana Agent pods running this integration. Volume names are mutated to be unique across all Integrations. Note that the specified volumes should be able to tolerate existing on multiple pods at once when type is daemonset. Don't use volumes for loading Secrets or ConfigMaps from the same namespace as the Integration; use the Secrets and ConfigMaps fields instead. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| An extra list of VolumeMounts to be associated with the Grafana Agent pods running this integration. VolumeMount names are mutated to be unique across all used IntegrationSpecs. Mount paths should include the namespace/name of the Integration CR to avoid potentially colliding with other resources. | -|`secrets`
_[[]Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| An extra list of keys from Secrets in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. Secrets will be mounted at /etc/grafana-agent/integrations/secrets/<secret_namespace>/<secret_name>/<key>. | -|`configMaps`
_[[]Kubernetes core/v1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#configmapkeyselector-v1-core)_| An extra list of keys from ConfigMaps in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. ConfigMaps are mounted at /etc/grafana-agent/integrations/configMaps/<configmap_namespace>/<configmap_name>/<key>. | -### IntegrationSpec -(Appears on:[Integration](#monitoring.grafana.com/v1alpha1.Integration)) -IntegrationSpec specifies the desired behavior of a metrics integration. -#### Fields -|Field|Description| -|-|-| -|`name`
_string_| Name of the integration to run (e.g., "node_exporter", "mysqld_exporter"). | -|`type`
_[IntegrationType](#monitoring.grafana.com/v1alpha1.IntegrationType)_| Type informs Grafana Agent Operator about how to manage the integration being configured. | -|`config`
_[k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON](https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1#JSON)_| The configuration for the named integration. Note that Integrations are deployed with the integrations-next feature flag, which has different common settings: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/ | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| An extra list of Volumes to be associated with the Grafana Agent pods running this integration. Volume names are mutated to be unique across all Integrations. Note that the specified volumes should be able to tolerate existing on multiple pods at once when type is daemonset. Don't use volumes for loading Secrets or ConfigMaps from the same namespace as the Integration; use the Secrets and ConfigMaps fields instead. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| An extra list of VolumeMounts to be associated with the Grafana Agent pods running this integration. VolumeMount names are mutated to be unique across all used IntegrationSpecs. Mount paths should include the namespace/name of the Integration CR to avoid potentially colliding with other resources. | -|`secrets`
_[[]Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| An extra list of keys from Secrets in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. Secrets will be mounted at /etc/grafana-agent/integrations/secrets/<secret_namespace>/<secret_name>/<key>. | -|`configMaps`
_[[]Kubernetes core/v1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#configmapkeyselector-v1-core)_| An extra list of keys from ConfigMaps in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. ConfigMaps are mounted at /etc/grafana-agent/integrations/configMaps/<configmap_namespace>/<configmap_name>/<key>. | -### IntegrationType -(Appears on:[IntegrationSpec](#monitoring.grafana.com/v1alpha1.IntegrationSpec)) -IntegrationType determines specific behaviors of a configured integration. -#### Fields -|Field|Description| -|-|-| -|`allNodes`
_bool_| When true, the configured integration should be run on every Node in the cluster. This is required for Integrations that generate Node-specific metrics like node_exporter, otherwise it must be false to avoid generating duplicate metrics. | -|`unique`
_bool_| Whether this integration can only be defined once for a Grafana Agent process, such as statsd_exporter. It is invalid for a GrafanaAgent to discover multiple unique Integrations with the same Integration name (i.e., a single GrafanaAgent cannot deploy two statsd_exporters). | -### IntegrationsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -IntegrationsSubsystemSpec defines global settings to apply across the integrations subsystem. -#### Fields -|Field|Description| -|-|-| -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Label selector to find Integration resources to run. When nil, no integration resources will be defined. | -|`namespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Label selector for namespaces to search when discovering integration resources. If nil, integration resources are only discovered in the namespace of the GrafanaAgent resource. Set to `{}` to search all namespaces. | -### JSONStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -JSONStageSpec is a parsing stage that reads the log line as JSON and accepts JMESPath expressions to extract data. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from the extracted data to parse as JSON. If empty, uses entire log message. | -|`expressions`
_map[string]string_| Set of the key/value pairs of JMESPath expressions. The key will be the key in the extracted data while the expression will be the value, evaluated as a JMESPath from the source data. Literal JMESPath expressions can be used by wrapping a key in double quotes, which then must be wrapped again in single quotes in YAML so they get passed to the JMESPath parser. | -### LimitStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -The limit stage is a rate-limiting stage that throttles logs based on several options. -#### Fields -|Field|Description| -|-|-| -|`rate`
_int_| The rate limit in lines per second that Promtail will push to Loki. | -|`burst`
_int_| The cap in the quantity of burst lines that Promtail will push to Loki. | -|`drop`
_bool_| When drop is true, log lines that exceed the current rate limit are discarded. When drop is false, log lines that exceed the current rate limit wait to enter the back pressure mode. Defaults to false. | -### LogsBackoffConfigSpec -(Appears on:[LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)) -LogsBackoffConfigSpec configures timing for retrying failed requests. -#### Fields -|Field|Description| -|-|-| -|`minPeriod`
_string_| Initial backoff time between retries. Time between retries is increased exponentially. | -|`maxPeriod`
_string_| Maximum backoff time between retries. | -|`maxRetries`
_int_| Maximum number of retries to perform before giving up a request. | -### LogsClientSpec -(Appears on:[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec), [LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)) -LogsClientSpec defines the client integration for logs, indicating which Loki server to send logs to. -#### Fields -|Field|Description| -|-|-| -|`url`
_string_| URL is the URL where Loki is listening. Must be a full HTTP URL, including protocol. Required. Example: https://logs-prod-us-central1.grafana.net/loki/api/v1/push. | -|`tenantId`
_string_| Tenant ID used by default to push logs to Loki. If omitted assumes remote Loki is running in single-tenant mode or an authentication layer is used to inject an X-Scope-OrgID header. | -|`batchWait`
_string_| Maximum amount of time to wait before sending a batch, even if that batch isn't full. | -|`batchSize`
_int_| Maximum batch size (in bytes) of logs to accumulate before sending the batch to Loki. | -|`basicAuth`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.BasicAuth)_| BasicAuth for the Loki server. | -|`oauth2`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.OAuth2](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.OAuth2)_| Oauth2 for URL | -|`bearerToken`
_string_| BearerToken used for remote_write. | -|`bearerTokenFile`
_string_| BearerTokenFile used to read bearer token. | -|`proxyUrl`
_string_| ProxyURL to proxy requests through. Optional. | -|`tlsConfig`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.TLSConfig)_| TLSConfig to use for the client. Only used when the protocol of the URL is https. | -|`backoffConfig`
_[LogsBackoffConfigSpec](#monitoring.grafana.com/v1alpha1.LogsBackoffConfigSpec)_| Configures how to retry requests to Loki when a request fails. Defaults to a minPeriod of 500ms, maxPeriod of 5m, and maxRetries of 10. | -|`externalLabels`
_map[string]string_| ExternalLabels are labels to add to any time series when sending data to Loki. | -|`timeout`
_string_| Maximum time to wait for a server to respond to a request. | -### LogsInstance -(Appears on:[LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)) -LogsInstance controls an individual logs instance within a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec)_| Spec holds the specification of the desired behavior for the logs instance. | -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| Clients controls where logs are written to for this instance. | -|`podLogsSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Determines which PodLogs should be selected for including in this instance. | -|`podLogsNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Set of labels to determine which namespaces should be watched for PodLogs. If not provided, checks only namespace of the instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Grafana Agent logging scrape configurations. Scrape configurations specified are appended to the configurations generated by the Grafana Agent Operator. Job configurations specified must have the form as specified in the official Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Grafana Agent. It is advised to review both Grafana Agent and Promtail release notes to ensure that no incompatible scrape configs are going to break Grafana Agent after the upgrade. | -|`targetConfig`
_[LogsTargetConfigSpec](#monitoring.grafana.com/v1alpha1.LogsTargetConfigSpec)_| Configures how tailed targets are watched. | -### LogsInstanceSpec -(Appears on:[LogsInstance](#monitoring.grafana.com/v1alpha1.LogsInstance)) -LogsInstanceSpec controls how an individual instance will be used to discover LogMonitors. -#### Fields -|Field|Description| -|-|-| -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| Clients controls where logs are written to for this instance. | -|`podLogsSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Determines which PodLogs should be selected for including in this instance. | -|`podLogsNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Set of labels to determine which namespaces should be watched for PodLogs. If not provided, checks only namespace of the instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Grafana Agent logging scrape configurations. Scrape configurations specified are appended to the configurations generated by the Grafana Agent Operator. Job configurations specified must have the form as specified in the official Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Grafana Agent. It is advised to review both Grafana Agent and Promtail release notes to ensure that no incompatible scrape configs are going to break Grafana Agent after the upgrade. | -|`targetConfig`
_[LogsTargetConfigSpec](#monitoring.grafana.com/v1alpha1.LogsTargetConfigSpec)_| Configures how tailed targets are watched. | -### LogsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -LogsSubsystemSpec defines global settings to apply across the logging subsystem. -#### Fields -|Field|Description| -|-|-| -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| A global set of clients to use when a discovered LogsInstance does not have any clients defined. | -|`logsExternalLabelName`
_string_| LogsExternalLabelName is the name of the external label used to denote Grafana Agent cluster. Defaults to "cluster." External label will _not_ be added when value is set to the empty string. | -|`instanceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceSelector determines which LogInstances should be selected for running. Each instance runs its own set of Prometheus components, including service discovery, scraping, and remote_write. | -|`instanceNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceNamespaceSelector are the set of labels to determine which namespaces to watch for LogInstances. If not provided, only checks own namespace. | -|`ignoreNamespaceSelectors`
_bool_| IgnoreNamespaceSelectors, if true, will ignore NamespaceSelector settings from the PodLogs configs, and they will only discover endpoints within their current namespace. | -|`enforcedNamespaceLabel`
_string_| EnforcedNamespaceLabel enforces adding a namespace label of origin for each metric that is user-created. The label value will always be the namespace of the object that is being created. | -### LogsTargetConfigSpec -(Appears on:[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec)) -LogsTargetConfigSpec configures how tailed targets are watched. -#### Fields -|Field|Description| -|-|-| -|`syncPeriod`
_string_| Period to resync directories being watched and files being tailed to discover new ones or stop watching removed ones. | -### MatchStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MatchStageSpec is a filtering stage that conditionally applies a set of stages or drop entries when a log entry matches a configurable LogQL stream selector and filter expressions. -#### Fields -|Field|Description| -|-|-| -|`selector`
_string_| LogQL stream selector and filter expressions. Required. | -|`pipelineName`
_string_| Names the pipeline. When defined, creates an additional label in the pipeline_duration_seconds histogram, where the value is concatenated with job_name using an underscore. | -|`action`
_string_| Determines what action is taken when the selector matches the log line. Can be keep or drop. Defaults to keep. When set to drop, entries are dropped and no later metrics are recorded. Stages must be empty when dropping metrics. | -|`dropCounterReason`
_string_| Every time a log line is dropped, the metric logentry_dropped_lines_total is incremented. A "reason" label is added, and can be customized by providing a custom value here. Defaults to "match_stage." | -|`stages`
_string_| Nested set of pipeline stages to execute when action is keep and the log line matches selector. An example value for stages may be: stages: | - json: {} - labelAllow: [foo, bar] Note that stages is a string because SIG API Machinery does not support recursive types, and so it cannot be validated for correctness. Be careful not to mistype anything. | -### MetadataConfig -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -MetadataConfig configures the sending of series metadata to remote storage. -#### Fields -|Field|Description| -|-|-| -|`send`
_bool_| Send enables metric metadata to be sent to remote storage. | -|`sendInterval`
_string_| SendInterval controls how frequently metric metadata is sent to remote storage. | -### MetricsInstance -(Appears on:[MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment)) -MetricsInstance controls an individual Metrics instance within a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[MetricsInstanceSpec](#monitoring.grafana.com/v1alpha1.MetricsInstanceSpec)_| Spec holds the specification of the desired behavior for the Metrics instance. | -|`walTruncateFrequency`
_string_| WALTruncateFrequency specifies how frequently to run the WAL truncation process. Higher values cause the WAL to increase and for old series to stay in the WAL longer, but reduces the chance of data loss when remote_write fails for longer than the given frequency. | -|`minWALTime`
_string_| MinWALTime is the minimum amount of time that series and samples can exist in the WAL before being considered for deletion. | -|`maxWALTime`
_string_| MaxWALTime is the maximum amount of time that series and samples can exist in the WAL before being forcibly deleted. | -|`remoteFlushDeadline`
_string_| RemoteFlushDeadline is the deadline for flushing data when an instance shuts down. | -|`writeStaleOnShutdown`
_bool_| WriteStaleOnShutdown writes staleness markers on shutdown for all series. | -|`serviceMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorSelector determines which ServiceMonitors to select for target discovery. | -|`serviceMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorNamespaceSelector is the set of labels that determine which namespaces to watch for ServiceMonitor discovery. If nil, it only checks its own namespace. | -|`podMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorSelector determines which PodMonitors to selected for target discovery. Experimental. | -|`podMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorNamespaceSelector are the set of labels to determine which namespaces to watch for PodMonitor discovery. If nil, it only checks its own namespace. | -|`probeSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeSelector determines which Probes to select for target discovery. | -|`probeNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeNamespaceSelector is the set of labels that determines which namespaces to watch for Probe discovery. If nil, it only checks own namespace. | -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls remote_write settings for this instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs lets you specify a key of a Secret containing additional Grafana Agent Prometheus scrape configurations. The specified scrape configurations are appended to the configurations generated by Grafana Agent Operator. Specified job configurations must have the form specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, you must make sure the configuration is still valid. Note that it's possible that this feature will break future upgrades of Grafana Agent. Review both Grafana Agent and Prometheus release notes to ensure that no incompatible scrape configs will break Grafana Agent after the upgrade. | -### MetricsInstanceSpec -(Appears on:[MetricsInstance](#monitoring.grafana.com/v1alpha1.MetricsInstance)) -MetricsInstanceSpec controls how an individual instance is used to discover PodMonitors. -#### Fields -|Field|Description| -|-|-| -|`walTruncateFrequency`
_string_| WALTruncateFrequency specifies how frequently to run the WAL truncation process. Higher values cause the WAL to increase and for old series to stay in the WAL longer, but reduces the chance of data loss when remote_write fails for longer than the given frequency. | -|`minWALTime`
_string_| MinWALTime is the minimum amount of time that series and samples can exist in the WAL before being considered for deletion. | -|`maxWALTime`
_string_| MaxWALTime is the maximum amount of time that series and samples can exist in the WAL before being forcibly deleted. | -|`remoteFlushDeadline`
_string_| RemoteFlushDeadline is the deadline for flushing data when an instance shuts down. | -|`writeStaleOnShutdown`
_bool_| WriteStaleOnShutdown writes staleness markers on shutdown for all series. | -|`serviceMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorSelector determines which ServiceMonitors to select for target discovery. | -|`serviceMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorNamespaceSelector is the set of labels that determine which namespaces to watch for ServiceMonitor discovery. If nil, it only checks its own namespace. | -|`podMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorSelector determines which PodMonitors to selected for target discovery. Experimental. | -|`podMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorNamespaceSelector are the set of labels to determine which namespaces to watch for PodMonitor discovery. If nil, it only checks its own namespace. | -|`probeSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeSelector determines which Probes to select for target discovery. | -|`probeNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeNamespaceSelector is the set of labels that determines which namespaces to watch for Probe discovery. If nil, it only checks own namespace. | -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls remote_write settings for this instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs lets you specify a key of a Secret containing additional Grafana Agent Prometheus scrape configurations. The specified scrape configurations are appended to the configurations generated by Grafana Agent Operator. Specified job configurations must have the form specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, you must make sure the configuration is still valid. Note that it's possible that this feature will break future upgrades of Grafana Agent. Review both Grafana Agent and Prometheus release notes to ensure that no incompatible scrape configs will break Grafana Agent after the upgrade. | -### MetricsStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MetricsStageSpec is an action stage that allows for defining and updating metrics based on data from the extracted map. Created metrics are not pushed to Loki or Prometheus and are instead exposed via the /metrics endpoint of the Grafana Agent pod. The Grafana Agent Operator should be configured with a MetricsInstance that discovers the logging DaemonSet to collect metrics created by this stage. -#### Fields -|Field|Description| -|-|-| -|`type`
_string_| The metric type to create. Must be one of counter, gauge, histogram. Required. | -|`description`
_string_| Sets the description for the created metric. | -|`prefix`
_string_| Sets the custom prefix name for the metric. Defaults to "promtail_custom_". | -|`source`
_string_| Key from the extracted data map to use for the metric. Defaults to the metrics name if not present. | -|`maxIdleDuration`
_string_| Label values on metrics are dynamic which can cause exported metrics to go stale. To prevent unbounded cardinality, any metrics not updated within MaxIdleDuration are removed. Must be greater or equal to 1s. Defaults to 5m. | -|`matchAll`
_bool_| If true, all log lines are counted without attempting to match the source to the extracted map. Mutually exclusive with value. Only valid for type: counter. | -|`countEntryBytes`
_bool_| If true all log line bytes are counted. Can only be set with matchAll: true and action: add. Only valid for type: counter. | -|`value`
_string_| Filters down source data and only changes the metric if the targeted value matches the provided string exactly. If not present, all data matches. | -|`action`
_string_| The action to take against the metric. Required. Must be either "inc" or "add" for type: counter or type: histogram. When type: gauge, must be one of "set", "inc", "dec", "add", or "sub". "add", "set", or "sub" requires the extracted value to be convertible to a positive float. | -|`buckets`
_[]string_| Buckets to create. Bucket values must be convertible to float64s. Extremely large or small numbers are subject to some loss of precision. Only valid for type: histogram. | -### MetricsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -MetricsSubsystemSpec defines global settings to apply across the Metrics subsystem. -#### Fields -|Field|Description| -|-|-| -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls default remote_write settings for all instances. If an instance does not provide its own RemoteWrite settings, these will be used instead. | -|`replicas`
_int32_| Replicas of each shard to deploy for metrics pods. Number of replicas multiplied by the number of shards is the total number of pods created. | -|`shards`
_int32_| Shards to distribute targets onto. Number of replicas multiplied by the number of shards is the total number of pods created. Note that scaling down shards does not reshard data onto remaining instances; it must be manually moved. Increasing shards does not reshard data either, but it will continue to be available from the same instances. Sharding is performed on the content of the __address__ target meta-label. | -|`replicaExternalLabelName`
_string_| ReplicaExternalLabelName is the name of the metrics external label used to denote the replica name. Defaults to __replica__. The external label is _not_ added when the value is set to the empty string. | -|`metricsExternalLabelName`
_string_| MetricsExternalLabelName is the name of the external label used to denote Grafana Agent cluster. Defaults to "cluster." The external label is _not_ added when the value is set to the empty string. | -|`scrapeInterval`
_string_| ScrapeInterval is the time between consecutive scrapes. | -|`scrapeTimeout`
_string_| ScrapeTimeout is the time to wait for a target to respond before marking a scrape as failed. | -|`externalLabels`
_map[string]string_| ExternalLabels are labels to add to any time series when sending data over remote_write. | -|`arbitraryFSAccessThroughSMs`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.ArbitraryFSAccessThroughSMsConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ArbitraryFSAccessThroughSMsConfig)_| ArbitraryFSAccessThroughSMs configures whether configuration based on a ServiceMonitor can access arbitrary files on the file system of the Grafana Agent container, e.g., bearer token files. | -|`overrideHonorLabels`
_bool_| OverrideHonorLabels, if true, overrides all configured honor_labels read from ServiceMonitor or PodMonitor and sets them to false. | -|`overrideHonorTimestamps`
_bool_| OverrideHonorTimestamps allows global enforcement for honoring timestamps in all scrape configs. | -|`ignoreNamespaceSelectors`
_bool_| IgnoreNamespaceSelectors, if true, ignores NamespaceSelector settings from the PodMonitor and ServiceMonitor configs, so that they only discover endpoints within their current namespace. | -|`enforcedNamespaceLabel`
_string_| EnforcedNamespaceLabel enforces adding a namespace label of origin for each metric that is user-created. The label value is always the namespace of the object that is being created. | -|`enforcedSampleLimit`
_uint64_| EnforcedSampleLimit defines a global limit on the number of scraped samples that are accepted. This overrides any SampleLimit set per ServiceMonitor and/or PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep the overall number of samples and series under the desired limit. Note that if a SampleLimit from a ServiceMonitor or PodMonitor is lower, that value is used instead. | -|`enforcedTargetLimit`
_uint64_| EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set per ServiceMonitor and/or PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall number of targets under the desired limit. Note that if a TargetLimit from a ServiceMonitor or PodMonitor is higher, that value is used instead. | -|`instanceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceSelector determines which MetricsInstances should be selected for running. Each instance runs its own set of Metrics components, including service discovery, scraping, and remote_write. | -|`instanceNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceNamespaceSelector is the set of labels that determines which namespaces to watch for MetricsInstances. If not provided, it only checks its own namespace. | -### MultilineStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MultilineStageSpec merges multiple lines into a multiline block before passing it on to the next stage in the pipeline. -#### Fields -|Field|Description| -|-|-| -|`firstLine`
_string_| RE2 regular expression. Creates a new multiline block when matched. Required. | -|`maxWaitTime`
_string_| Maximum time to wait before passing on the multiline block to the next stage if no new lines are received. Defaults to 3s. | -|`maxLines`
_int_| Maximum number of lines a block can have. A new block is started if the number of lines surpasses this value. Defaults to 128. | -### ObjectSelector -ObjectSelector is a set of selectors to use for finding an object in the resource hierarchy. When NamespaceSelector is nil, search for objects directly in the ParentNamespace. -#### Fields -|Field|Description| -|-|-| -|`ObjectType`
_[sigs.k8s.io/controller-runtime/pkg/client.Object](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client#Object)_| | -|`ParentNamespace`
_string_| | -|`NamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| | -|`Labels`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| | -### OutputStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -OutputStageSpec is an action stage that takes data from the extracted map and changes the log line that will be sent to Loki. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extract data to use for the log entry. Required. | -### PackStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -PackStageSpec is a transform stage that lets you embed extracted values and labels into the log line by packing the log line and labels inside of a JSON object. -#### Fields -|Field|Description| -|-|-| -|`labels`
_[]string_| Name from extracted data or line labels. Required. Labels provided here are automatically removed from output labels. | -|`ingestTimestamp`
_bool_| If the resulting log line should use any existing timestamp or use time.Now() when the line was created. Set to true when combining several log streams from different containers to avoid out of order errors. | -### PipelineStageSpec -(Appears on:[PodLogsSpec](#monitoring.grafana.com/v1alpha1.PodLogsSpec)) -PipelineStageSpec defines an individual pipeline stage. Each stage type is mutually exclusive and no more than one may be set per stage. More information on pipelines can be found in the Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/pipelines/ -#### Fields -|Field|Description| -|-|-| -|`cri`
_[CRIStageSpec](#monitoring.grafana.com/v1alpha1.CRIStageSpec)_| CRI is a parsing stage that reads log lines using the standard CRI logging format. Supply cri: {} to enable. | -|`docker`
_[DockerStageSpec](#monitoring.grafana.com/v1alpha1.DockerStageSpec)_| Docker is a parsing stage that reads log lines using the standard Docker logging format. Supply docker: {} to enable. | -|`drop`
_[DropStageSpec](#monitoring.grafana.com/v1alpha1.DropStageSpec)_| Drop is a filtering stage that lets you drop certain logs. | -|`json`
_[JSONStageSpec](#monitoring.grafana.com/v1alpha1.JSONStageSpec)_| JSON is a parsing stage that reads the log line as JSON and accepts JMESPath expressions to extract data. Information on JMESPath: http://jmespath.org/ | -|`labelAllow`
_[]string_| LabelAllow is an action stage that only allows the provided labels to be included in the label set that is sent to Loki with the log entry. | -|`labelDrop`
_[]string_| LabelDrop is an action stage that drops labels from the label set that is sent to Loki with the log entry. | -|`labels`
_map[string]string_| Labels is an action stage that takes data from the extracted map and modifies the label set that is sent to Loki with the log entry. The key is REQUIRED and represents the name for the label that will be created. Value is optional and will be the name from extracted data to use for the value of the label. If the value is not provided, it defaults to match the key. | -|`limit`
_[LimitStageSpec](#monitoring.grafana.com/v1alpha1.LimitStageSpec)_| Limit is a rate-limiting stage that throttles logs based on several options. | -|`match`
_[MatchStageSpec](#monitoring.grafana.com/v1alpha1.MatchStageSpec)_| Match is a filtering stage that conditionally applies a set of stages or drop entries when a log entry matches a configurable LogQL stream selector and filter expressions. | -|`metrics`
_[map[string]github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1.MetricsStageSpec](#monitoring.grafana.com/v1alpha1.MetricsStageSpec)_| Metrics is an action stage that supports defining and updating metrics based on data from the extracted map. Created metrics are not pushed to Loki or Prometheus and are instead exposed via the /metrics endpoint of the Grafana Agent pod. The Grafana Agent Operator should be configured with a MetricsInstance that discovers the logging DaemonSet to collect metrics created by this stage. | -|`multiline`
_[MultilineStageSpec](#monitoring.grafana.com/v1alpha1.MultilineStageSpec)_| Multiline stage merges multiple lines into a multiline block before passing it on to the next stage in the pipeline. | -|`output`
_[OutputStageSpec](#monitoring.grafana.com/v1alpha1.OutputStageSpec)_| Output stage is an action stage that takes data from the extracted map and changes the log line that will be sent to Loki. | -|`pack`
_[PackStageSpec](#monitoring.grafana.com/v1alpha1.PackStageSpec)_| Pack is a transform stage that lets you embed extracted values and labels into the log line by packing the log line and labels inside of a JSON object. | -|`regex`
_[RegexStageSpec](#monitoring.grafana.com/v1alpha1.RegexStageSpec)_| Regex is a parsing stage that parses a log line using a regular expression. Named capture groups in the regex allows for adding data into the extracted map. | -|`replace`
_[ReplaceStageSpec](#monitoring.grafana.com/v1alpha1.ReplaceStageSpec)_| Replace is a parsing stage that parses a log line using a regular expression and replaces the log line. Named capture groups in the regex allows for adding data into the extracted map. | -|`template`
_[TemplateStageSpec](#monitoring.grafana.com/v1alpha1.TemplateStageSpec)_| Template is a transform stage that manipulates the values in the extracted map using Go's template syntax. | -|`tenant`
_[TenantStageSpec](#monitoring.grafana.com/v1alpha1.TenantStageSpec)_| Tenant is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. If the field is missing, the default LogsClientSpec.tenantId will be used. | -|`timestamp`
_[TimestampStageSpec](#monitoring.grafana.com/v1alpha1.TimestampStageSpec)_| Timestamp is an action stage that can change the timestamp of a log line before it is sent to Loki. If not present, the timestamp of a log line defaults to the time when the log line was read. | -### PodLogs -(Appears on:[LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)) -PodLogs defines how to collect logs for a pod. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[PodLogsSpec](#monitoring.grafana.com/v1alpha1.PodLogsSpec)_| Spec holds the specification of the desired behavior for the PodLogs. | -|`jobLabel`
_string_| The label to use to retrieve the job name from. | -|`podTargetLabels`
_[]string_| PodTargetLabels transfers labels on the Kubernetes Pod onto the target. | -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Selector to select Pod objects. Required. | -|`namespaceSelector`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.NamespaceSelector)_| Selector to select which namespaces the Pod objects are discovered from. | -|`pipelineStages`
_[[]PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)_| Pipeline stages for this pod. Pipeline stages support transforming and filtering log lines. | -|`relabelings`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| RelabelConfigs to apply to logs before delivering. Grafana Agent Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_logs_job_name. More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs | -### PodLogsSpec -(Appears on:[PodLogs](#monitoring.grafana.com/v1alpha1.PodLogs)) -PodLogsSpec defines how to collect logs for a pod. -#### Fields -|Field|Description| -|-|-| -|`jobLabel`
_string_| The label to use to retrieve the job name from. | -|`podTargetLabels`
_[]string_| PodTargetLabels transfers labels on the Kubernetes Pod onto the target. | -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Selector to select Pod objects. Required. | -|`namespaceSelector`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.NamespaceSelector)_| Selector to select which namespaces the Pod objects are discovered from. | -|`pipelineStages`
_[[]PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)_| Pipeline stages for this pod. Pipeline stages support transforming and filtering log lines. | -|`relabelings`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| RelabelConfigs to apply to logs before delivering. Grafana Agent Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_logs_job_name. More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs | -### QueueConfig -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -QueueConfig allows the tuning of remote_write queue_config parameters. -#### Fields -|Field|Description| -|-|-| -|`capacity`
_int_| Capacity is the number of samples to buffer per shard before samples start being dropped. | -|`minShards`
_int_| MinShards is the minimum number of shards, i.e., the amount of concurrency. | -|`maxShards`
_int_| MaxShards is the maximum number of shards, i.e., the amount of concurrency. | -|`maxSamplesPerSend`
_int_| MaxSamplesPerSend is the maximum number of samples per send. | -|`batchSendDeadline`
_string_| BatchSendDeadline is the maximum time a sample will wait in the buffer. | -|`maxRetries`
_int_| MaxRetries is the maximum number of times to retry a batch on recoverable errors. | -|`minBackoff`
_string_| MinBackoff is the initial retry delay. MinBackoff is doubled for every retry. | -|`maxBackoff`
_string_| MaxBackoff is the maximum retry delay. | -|`retryOnRateLimit`
_bool_| RetryOnRateLimit retries requests when encountering rate limits. | -### RegexStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -RegexStageSpec is a parsing stage that parses a log line using a regular expression. Named capture groups in the regex allows for adding data into the extracted map. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. If empty, defaults to using the log message. | -|`expression`
_string_| RE2 regular expression. Each capture group MUST be named. Required. | -### RemoteWriteSpec -(Appears on:[MetricsInstanceSpec](#monitoring.grafana.com/v1alpha1.MetricsInstanceSpec), [MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)) -RemoteWriteSpec defines the remote_write configuration for Prometheus. -#### Fields -|Field|Description| -|-|-| -|`name`
_string_| Name of the remote_write queue. Must be unique if specified. The name is used in metrics and logging in order to differentiate queues. | -|`url`
_string_| URL of the endpoint to send samples to. | -|`remoteTimeout`
_string_| RemoteTimeout is the timeout for requests to the remote_write endpoint. | -|`headers`
_map[string]string_| Headers is a set of custom HTTP headers to be sent along with each remote_write request. Be aware that any headers set by Grafana Agent itself can't be overwritten. | -|`writeRelabelConfigs`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| WriteRelabelConfigs holds relabel_configs to relabel samples before they are sent to the remote_write endpoint. | -|`basicAuth`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.BasicAuth)_| BasicAuth for the URL. | -|`oauth2`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.OAuth2](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.OAuth2)_| Oauth2 for URL | -|`bearerToken`
_string_| BearerToken used for remote_write. | -|`bearerTokenFile`
_string_| BearerTokenFile used to read bearer token. | -|`sigv4`
_[SigV4Config](#monitoring.grafana.com/v1alpha1.SigV4Config)_| SigV4 configures SigV4-based authentication to the remote_write endpoint. SigV4-based authentication is used if SigV4 is defined, even with an empty object. | -|`tlsConfig`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.TLSConfig)_| TLSConfig to use for remote_write. | -|`proxyUrl`
_string_| ProxyURL to proxy requests through. Optional. | -|`queueConfig`
_[QueueConfig](#monitoring.grafana.com/v1alpha1.QueueConfig)_| QueueConfig allows tuning of the remote_write queue parameters. | -|`metadataConfig`
_[MetadataConfig](#monitoring.grafana.com/v1alpha1.MetadataConfig)_| MetadataConfig configures the sending of series metadata to remote storage. | -### ReplaceStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -ReplaceStageSpec is a parsing stage that parses a log line using a regular expression and replaces the log line. Named capture groups in the regex allows for adding data into the extracted map. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. If empty, defaults to using the log message. | -|`expression`
_string_| RE2 regular expression. Each capture group MUST be named. Required. | -|`replace`
_string_| Value to replace the captured group with. | -### SigV4Config -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -SigV4Config specifies configuration to perform SigV4 authentication. -#### Fields -|Field|Description| -|-|-| -|`region`
_string_| Region of the AWS endpoint. If blank, the region from the default credentials chain is used. | -|`accessKey`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AccessKey holds the secret of the AWS API access key to use for signing. If not provided, the environment variable AWS_ACCESS_KEY_ID is used. | -|`secretKey`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| SecretKey of the AWS API to use for signing. If blank, the environment variable AWS_SECRET_ACCESS_KEY is used. | -|`profile`
_string_| Profile is the named AWS profile to use for authentication. | -|`roleARN`
_string_| RoleARN is the AWS Role ARN to use for authentication, as an alternative for using the AWS API keys. | -### TemplateStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TemplateStageSpec is a transform stage that manipulates the values in the extracted map using Go's template syntax. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. Required. If empty, defaults to using the log message. | -|`template`
_string_| Go template string to use. Required. In addition to normal template functions, ToLower, ToUpper, Replace, Trim, TrimLeft, TrimRight, TrimPrefix, and TrimSpace are also available. | -### TenantStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TenantStageSpec is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. -#### Fields -|Field|Description| -|-|-| -|`label`
_string_| Name from labels whose value should be set as tenant ID. Mutually exclusive with source and value. | -|`source`
_string_| Name from extracted data to use as the tenant ID. Mutually exclusive with label and value. | -|`value`
_string_| Value to use for the template ID. Useful when this stage is used within a conditional pipeline such as match. Mutually exclusive with label and source. | -### TimestampStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TimestampStageSpec is an action stage that can change the timestamp of a log line before it is sent to Loki. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to use as the timestamp. Required. | -|`format`
_string_| Determines format of the time string. Required. Can be one of: ANSIC, UnixDate, RubyDate, RFC822, RFC822Z, RFC850, RFC1123, RFC1123Z, RFC3339, RFC3339Nano, Unix, UnixMs, UnixUs, UnixNs. | -|`fallbackFormats`
_[]string_| Fallback formats to try if format fails. | -|`location`
_string_| IANA Timezone Database string. | -|`actionOnFailure`
_string_| Action to take when the timestamp can't be extracted or parsed. Can be skip or fudge. Defaults to fudge. | diff --git a/docs/sources/operator/architecture.md b/docs/sources/operator/architecture.md deleted file mode 100644 index ba0b5c97fd..0000000000 --- a/docs/sources/operator/architecture.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/architecture/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/architecture/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/architecture/ -- /docs/grafana-cloud/send-data/agent/operator/architecture/ -canonical: https://grafana.com/docs/agent/latest/operator/architecture/ -description: Learn about Grafana Agent architecture -title: Architecture -weight: 300 ---- - -# Architecture - -Grafana Agent Operator works by watching for Kubernetes [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) that specify how to collect telemetry data from your Kubernetes cluster and where to send it. Agent Operator manages corresponding Grafana Agent deployments in your cluster by watching for changes against the custom resources. - -Grafana Agent Operator works in two phases—it discovers a hierarchy of custom resources and it reconciles that hierarchy into a Grafana Agent deployment. - -## Custom resource hierarchy - -The root of the custom resource hierarchy is the `GrafanaAgent` resource—the primary resource Agent Operator looks for. `GrafanaAgent` is called the _root_ because it -discovers other sub-resources, `MetricsInstance` and `LogsInstance`. The `GrafanaAgent` resource endows them with Pod attributes defined in the GrafanaAgent specification, for example, Pod requests, limits, affinities, and tolerations, and defines the Grafana Agent image. You can only define Pod attributes at the `GrafanaAgent` level. They are propagated to MetricsInstance and LogsInstance Pods. - -The full hierarchy of custom resources is as follows: - -- `GrafanaAgent` - - `MetricsInstance` - - `PodMonitor` - - `Probe` - - `ServiceMonitor` - - `LogsInstance` - - `PodLogs` - -The following table describes these custom resources: - -| Custom resource | description | -|---|---| -| `GrafanaAgent` | Discovers one or more `MetricsInstance` and `LogsInstance` resources. | -| `MetricsInstance` | Defines where to ship collected metrics. This rolls out a Grafana Agent StatefulSet that will scrape and ship metrics to a `remote_write` endpoint. | -| `ServiceMonitor` | Collects [cAdvisor](https://github.com/google/cadvisor) and [kubelet metrics](https://github.com/kubernetes/kube-state-metrics). This configures the `MetricsInstance` / Agent StatefulSet | -| `LogsInstance` | Defines where to ship collected logs. This rolls out a Grafana Agent [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that will tail log files on your cluster nodes. | -| `PodLogs` | Collects container logs from Kubernetes Pods. This configures the `LogsInstance` / Agent DaemonSet. | - -Most of the Grafana Agent Operator resources have the ability to reference a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) or a -[Secret](https://kubernetes.io/docs/concepts/configuration/secret/). All referenced ConfigMaps or Secrets are added into the resource -hierarchy. - -When a hierarchy is established, each item is watched for changes. Any changed -item causes a reconcile of the root `GrafanaAgent` resource, either -creating, modifying, or deleting the corresponding Grafana Agent deployment. - -A single resource can belong to multiple hierarchies. For example, if two -`GrafanaAgents` use the same Probe, modifying that Probe causes both -`GrafanaAgents` to be reconciled. - -To set up monitoring, Grafana Agent Operator works in the following two phases: - -- Builds (discovers) a hierarchy of custom resources. -- Reconciles that hierarchy into a Grafana Agent deployment. - -Agent Operator also performs [sharding and replication](#sharding-and-replication) and adds [labels](#added-labels) to every metric. - -## How Agent Operator builds the custom resource hierarchy - -Grafana Agent Operator builds the hierarchy using label matching on the custom resources. The following figure illustrates the matching. The `GrafanaAgent` picks up the `MetricsInstance` -and `LogsInstance` that match the label `instance: primary`. The instances pick up the resources the same way. - -{{
}} - -### To validate the Secrets - -The generated configurations are saved in Secrets. To download and -validate them manually, use the following commands: - -``` -$ kubectl get secrets -logs-config -o json | jq -r '.data."agent.yml"' | base64 --decode -$ kubectl get secrets -config -o json | jq -r '.data."agent.yml"' | base64 --decode -``` - -## How Agent Operator reconciles the custom resource hierarchy - -When a resource hierarchy is created, updated, or deleted, a reconcile occurs. -When a `GrafanaAgent` resource is deleted, the corresponding Grafana Agent -deployment will also be deleted. - -Reconciling creates the following cluster resources: - -1. A Secret that holds the Grafana Agent - [configuration]({{< relref "../static/configuration/_index.md" >}}) is generated. -2. A Secret that holds all referenced Secrets or ConfigMaps from - the resource hierarchy is generated. This ensures that Secrets referenced from a custom - resource in another namespace can still be read. -3. A Service is created to govern the StatefulSets that are generated. -4. One StatefulSet per Prometheus shard is created. - -PodMonitors, Probes, and ServiceMonitors are turned into individual scrape jobs -which all use Kubernetes Service Discovery (SD). - -## Sharding and replication - -The GrafanaAgent resource can specify a number of shards. Each shard results in -the creation of a StatefulSet with a hashmod + keep relabel_config per job: - -```yaml -- source_labels: [__address__] - target_label: __tmp_hash - modulus: NUM_SHARDS - action: hashmod -- source_labels: [__tmp_hash] - regex: CURRENT_STATEFULSET_SHARD - action: keep -``` - -This allows for horizontal scaling capabilities, where each shard -will handle roughly 1/N of the total scrape load. Note that this does not use -consistent hashing, which means changing the number of shards will cause -anywhere between 1/N to N targets to reshuffle. - -The sharding mechanism is borrowed from the Prometheus Operator. - -The number of replicas can be defined, similarly to the number of shards. This -creates deduplicate shards. This must be paired with a `remote_write` system that -can perform HA deduplication. [Grafana Cloud](/docs/grafana-cloud/) and [Mimir](/docs/mimir/latest/) provide this out of the -box, and the Grafana Agent Operator defaults support these two systems. - -The total number of created metrics pods will be the product of `numShards * -numReplicas`. - -## Added labels - -Two labels are added by default to every metric: - -- `cluster`, representing the `GrafanaAgent` deployment. Holds the value of - `/`. -- `__replica__`, representing the replica number of the Agent. This label works - out of the box with Grafana Cloud and Cortex's [HA - deduplication](https://cortexmetrics.io/docs/guides/ha-pair-handling/). - -The shard number is not added as a label, as sharding is designed to be -transparent on the receiver end. - -## Enable sharding and replication - -To enable sharding and replication, you must set the `shards` and `replicas` properties in the Grafana Agent configuration file. For example, the following configuration file would shard the data into three shards and replicate each shard to two other Grafana Agent instances: - -``` -shards: 3 -replicas: 2 -``` - -You can also enable sharding and replication by setting the `shards` and `replicas` arguments when you start the Grafana Agent. - -### Examples - -The following examples show you how to enable sharding and replication in a Kubernetes environment. - -* To shard the data into three shards and replicate each shard to two other Grafana Agent instances, you would use the following deployment manifest: - - ``` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent - spec: - replicas: 3 - selector: - matchLabels: - app: grafana-agent - template: - metadata: - labels: - app: grafana-agent - spec: - containers: - - name: grafana-agent - image: grafana/agent:latest - args: - - "--shards=3" - - "--replicas=2" - ``` - -* To shard the data into 10 shards and replicate each shard to three other Grafana Agent instances, you would use the following deployment manifest: - - ``` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent - spec: - replicas: 10 - selector: - matchLabels: - app: grafana-agent - template: - metadata: - labels: - app: grafana-agent - spec: - containers: - - name: grafana-agent - image: grafana/agent:latest - args: - - "--shards=10" - - "--replicas=3" - ``` - diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md deleted file mode 100644 index 6b6f6564c8..0000000000 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/send-data/agent/operator/deploy-agent-operator-resources/ -- custom-resource-quickstart/ -canonical: https://grafana.com/docs/agent/latest/operator/deploy-agent-operator-resources/ -description: Learn how to deploy Operator resources -title: Deploy Operator resources -weight: 120 ---- -# Deploy Operator resources - -To start collecting telemetry data, you need to roll out Grafana Agent Operator custom resources into your Kubernetes cluster. Before you can create the custom resources, you must first apply the Agent Custom Resource Definitions (CRDs) and install Agent Operator, with or without Helm. If you haven't yet taken these steps, follow the instructions in one of the following topics: - -- [Install Agent Operator]({{< relref "./getting-started" >}}) -- [Install Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) - -Follow the steps in this guide to roll out the Grafana Agent Operator custom resources to: - -- Scrape and ship cAdvisor and kubelet metrics to a Prometheus-compatible metrics endpoint. -- Collect and ship your Pods’ container logs to a Loki-compatible logs endpoint. - -The hierarchy of custom resources is as follows: - -- `GrafanaAgent` - - `MetricsInstance` - - `PodMonitor` - - `Probe` - - `ServiceMonitor` - - `LogsInstance` - - `PodLogs` - -To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). - -{{< admonition type="note" >}} -Agent Operator is currently in [beta]({{< relref "../stability.md#beta" >}}) and its custom resources are subject to change. -{{< /admonition >}} - -## Before you begin - -Before you begin, make sure that you have deployed the Grafana Agent Operator CRDs and installed Agent Operator into your cluster. See [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) or [Install Grafana Agent Operator]({{< relref "./getting-started" >}}) for instructions. - -## Deploy the GrafanaAgent resource - -In this section, you'll roll out a `GrafanaAgent` resource. See [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) for a discussion of the resources in the `GrafanaAgent` resource hierarchy. - -{{< admonition type="note" >}} -Due to the variety of possible deployment architectures, the official Agent Operator Helm chart does not provide built-in templates for the custom resources described in this guide. You must configure and deploy these manually as described in this section. We recommend templating and adding the following manifests to your own in-house Helm charts and GitOps flows. -{{< /admonition >}} - -To deploy the `GrafanaAgent` resource: - -1. Copy the following manifests to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: GrafanaAgent - metadata: - name: grafana-agent - namespace: default - labels: - app: grafana-agent - spec: - image: grafana/agent:{{< param "AGENT_RELEASE" >}} - integrations: - selector: - matchLabels: - agent: grafana-agent-integrations - logLevel: info - serviceAccountName: grafana-agent - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-metrics - externalLabels: - cluster: cloud - - logs: - instanceSelector: - matchLabels: - agent: grafana-agent-logs - - --- - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: grafana-agent - namespace: default - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: grafana-agent - rules: - - apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch - - apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: grafana-agent - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent - subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default - ``` - - In the first manifest, the `GrafanaAgent` resource: - - - Specifies an Agent image version. - - Specifies `MetricsInstance` and `LogsInstance` selectors. These search for `MetricsInstances` and `LogsInstances` in the same namespace with labels matching `agent: grafana-agent-metrics` and `agent: grafana-agent-logs`, respectively. - - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml). - -1. Customize the manifests as needed and roll them out to your cluster using `kubectl apply -f` followed by the filename. - - This step creates a `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` for the `GrafanaAgent` resource. - - Deploying a `GrafanaAgent` resource on its own does not spin up Agent Pods. Agent Operator creates Agent Pods once `MetricsInstance` and `LogsIntance` resources have been created. Follow the instructions in the [Deploy a MetricsInstance resource](#deploy-a-metricsinstance-resource) and [Deploy LogsInstance and PodLogs resources](#deploy-logsinstance-and-podlogs-resources) sections to create these resources. - -### Disable feature flags reporting - -To disable the [reporting]({{< relref "../static/configuration/flags.md#report-information-usage" >}}) usage of feature flags to Grafana, set `disableReporting` field to `true`. - -### Disable support bundle generation - -To disable the [support bundles functionality]({{< relref "../static/configuration/flags.md#support-bundles" >}}), set the `disableSupportBundle` field to `true`. - -## Deploy a MetricsInstance resource - -Next, you'll roll out a `MetricsInstance` resource. `MetricsInstance` resources define a `remote_write` sink for metrics and configure one or more selectors to watch for creation and updates to `*Monitor` objects. These objects allow you to define Agent scrape targets via Kubernetes manifests: - -- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) -- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitor) -- [Probes](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#probe) - -To deploy a `MetricsInstance` resource: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: MetricsInstance - metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-metrics - spec: - remoteWrite: - - url: your_remote_write_URL - basicAuth: - username: - name: primary-credentials-metrics - key: username - password: - name: primary-credentials-metrics - key: password - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR - serviceMonitorNamespaceSelector: {} - serviceMonitorSelector: - matchLabels: - instance: primary - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR. - podMonitorNamespaceSelector: {} - podMonitorSelector: - matchLabels: - instance: primary - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR. - probeNamespaceSelector: {} - probeSelector: - matchLabels: - instance: primary - ``` - -1. Replace the `remote_write` URL and customize the namespace and label configuration as necessary. - - This step associates the `MetricsInstance` resource with the `agent: grafana-agent` `GrafanaAgent` resource deployed in the previous step. The `MetricsInstance` resource watches for creation and updates to `*Monitors` with the `instance: primary` label. - -1. Once you've rolled out the manifest, create the `basicAuth` credentials [using a Kubernetes Secret](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/): - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: primary-credentials-metrics - namespace: default - stringData: - username: 'your_cloud_prometheus_username' - password: 'your_cloud_prometheus_API_key' - ``` - -If you're using Grafana Cloud, you can find your hosted Loki endpoint username and password by clicking **Details** on the Loki tile on the [Grafana Cloud Portal](/profile/org). If you want to base64-encode these values yourself, use `data` instead of `stringData`. - -Once you've rolled out the `MetricsInstance` and its Secret, you can confirm that the `MetricsInstance` Agent is up and running using `kubectl get pod`. Since you haven't defined any monitors yet, this Agent doesn't have any scrape targets defined. In the next section, you'll create scrape targets for the cAdvisor and kubelet endpoints exposed by the `kubelet` service in the cluster. - -## Create ServiceMonitors for kubelet and cAdvisor endpoints - -Next, you'll create ServiceMonitors for kubelet and cAdvisor metrics exposed by the `kubelet` service. Every Node in your cluster exposes kubelet and cAdvisor metrics at `/metrics` and `/metrics/cadvisor`, respectively. Agent Operator creates a `kubelet` service that exposes these Node endpoints so that they can be scraped using ServiceMonitors. - -To scrape the kubelet and cAdvisor endpoints: - -1. Copy the following kubelet ServiceMonitor manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.coreos.com/v1 - kind: ServiceMonitor - metadata: - labels: - instance: primary - name: kubelet-monitor - namespace: default - spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - interval: 60s - metricRelabelings: - - action: keep - regex: kubelet_cgroup_manager_duration_seconds_count|go_goroutines|kubelet_pod_start_duration_seconds_count|kubelet_runtime_operations_total|kubelet_pleg_relist_duration_seconds_bucket|volume_manager_total_volumes|kubelet_volume_stats_capacity_bytes|container_cpu_usage_seconds_total|container_network_transmit_bytes_total|kubelet_runtime_operations_errors_total|container_network_receive_bytes_total|container_memory_swap|container_network_receive_packets_total|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|kubelet_running_pod_count|node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate|container_memory_working_set_bytes|storage_operation_errors_total|kubelet_pleg_relist_duration_seconds_count|kubelet_running_pods|rest_client_request_duration_seconds_bucket|process_resident_memory_bytes|storage_operation_duration_seconds_count|kubelet_running_containers|kubelet_runtime_operations_duration_seconds_bucket|kubelet_node_config_error|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_running_container_count|kubelet_volume_stats_available_bytes|kubelet_volume_stats_inodes|container_memory_rss|kubelet_pod_worker_duration_seconds_count|kubelet_node_name|kubelet_pleg_relist_interval_seconds_bucket|container_network_receive_packets_dropped_total|kubelet_pod_worker_duration_seconds_bucket|container_start_time_seconds|container_network_transmit_packets_dropped_total|process_cpu_seconds_total|storage_operation_duration_seconds_bucket|container_memory_cache|container_network_transmit_packets_total|kubelet_volume_stats_inodes_used|up|rest_client_requests_total - sourceLabels: - - __name__ - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - targetLabel: job - replacement: integrations/kubernetes/kubelet - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet - ``` - -1. Copy the following cAdvisor ServiceMonitor manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.coreos.com/v1 - kind: ServiceMonitor - metadata: - labels: - instance: primary - name: cadvisor-monitor - namespace: default - spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - honorTimestamps: false - interval: 60s - metricRelabelings: - - action: keep - regex: kubelet_cgroup_manager_duration_seconds_count|go_goroutines|kubelet_pod_start_duration_seconds_count|kubelet_runtime_operations_total|kubelet_pleg_relist_duration_seconds_bucket|volume_manager_total_volumes|kubelet_volume_stats_capacity_bytes|container_cpu_usage_seconds_total|container_network_transmit_bytes_total|kubelet_runtime_operations_errors_total|container_network_receive_bytes_total|container_memory_swap|container_network_receive_packets_total|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|kubelet_running_pod_count|node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate|container_memory_working_set_bytes|storage_operation_errors_total|kubelet_pleg_relist_duration_seconds_count|kubelet_running_pods|rest_client_request_duration_seconds_bucket|process_resident_memory_bytes|storage_operation_duration_seconds_count|kubelet_running_containers|kubelet_runtime_operations_duration_seconds_bucket|kubelet_node_config_error|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_running_container_count|kubelet_volume_stats_available_bytes|kubelet_volume_stats_inodes|container_memory_rss|kubelet_pod_worker_duration_seconds_count|kubelet_node_name|kubelet_pleg_relist_interval_seconds_bucket|container_network_receive_packets_dropped_total|kubelet_pod_worker_duration_seconds_bucket|container_start_time_seconds|container_network_transmit_packets_dropped_total|process_cpu_seconds_total|storage_operation_duration_seconds_bucket|container_memory_cache|container_network_transmit_packets_total|kubelet_volume_stats_inodes_used|up|rest_client_requests_total - sourceLabels: - - __name__ - path: /metrics/cadvisor - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - targetLabel: job - replacement: integrations/kubernetes/cadvisor - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet - ``` - -These two ServiceMonitors configure Agent to scrape all the kubelet and cAdvisor endpoints in your Kubernetes cluster (one of each per Node). In addition, it defines a `job` label which you can update (it is preset here for compatibility with Grafana Cloud's Kubernetes integration). It also provides an allowlist containing a core set of Kubernetes metrics to reduce remote metrics usage. If you don't need this allowlist, you can omit it, however, your metrics usage will increase significantly. - - When you're done, Agent should now be shipping kubelet and cAdvisor metrics to your remote Prometheus endpoint. To check this in Grafana Cloud, go to your dashboards, select **Integration - Kubernetes**, then select **Kubernetes / Kubelet**. - -## Deploy LogsInstance and PodLogs resources - -Next, you'll deploy a `LogsInstance` resource to collect logs from your cluster Nodes and ship these to your remote Loki endpoint. Agent Operator deploys a DaemonSet of Agents in your cluster that will tail log files defined in `PodLogs` resources. - -To deploy the `LogsInstance` resource into your cluster: - -1. Copy the following manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: LogsInstance - metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-logs - spec: - clients: - - url: your_remote_logs_URL - basicAuth: - username: - name: primary-credentials-logs - key: username - password: - name: primary-credentials-logs - key: password - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the LogsInstance CR - podLogsNamespaceSelector: {} - podLogsSelector: - matchLabels: - instance: primary - ``` - - This `LogsInstance` picks up `PodLogs` resources with the `instance: primary` label. Be sure to set the Loki URL to the correct push endpoint. For Grafana Cloud, this will look similar to `logs-prod-us-central1.grafana.net/loki/api/v1/push`, however check the [Grafana Cloud Portal](/profile/org) to confirm by clicking **Details** on the Loki tile. - - Also note that this example uses the `agent: grafana-agent-logs` label, which associates this `LogsInstance` with the `GrafanaAgent` resource defined earlier. This means that it will inherit requests, limits, affinities and other properties defined in the `GrafanaAgent` custom resource. - -1. To create the Secret for the `LogsInstance` resource, copy the following Secret manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: primary-credentials-logs - namespace: default - stringData: - username: 'your_username_here' - password: 'your_password_here' - ``` - - If you're using Grafana Cloud, you can find your hosted Loki endpoint username and password by clicking **Details** on the Loki tile on the [Grafana Cloud Portal](/profile/org). If you want to base64-encode these values yourself, use `data` instead of `stringData`. - -1. Copy the following `PodLogs` manifest to a file, then roll it to your cluster using `kubectl apply -f` followed by the filename. The manifest defines your logging targets. Agent Operator turns this into Agent configuration for the logs subsystem, and rolls it out to the DaemonSet of logging Agents. - - {{< admonition type="note" >}} - The following is a minimal working example which you should adapt to your production needs. - {{< /admonition >}} - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: PodLogs - metadata: - labels: - instance: primary - name: kubernetes-pods - namespace: default - spec: - pipelineStages: - - docker: {} - namespaceSelector: - matchNames: - - default - selector: - matchLabels: {} - ``` - - This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml). - - The above `PodLogs` resource adds the following labels to log lines: - - - `namespace` - - `service` - - `pod` - - `container` - - `job` (set to `PodLogs_namespace/PodLogs_name`) - - `__path__` (the path to log files, set to `/var/log/pods/*$1/*.log` where `$1` is `__meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name`) - - To learn more about this configuration format and other available labels, see the [Promtail Scraping](/docs/loki/latest/clients/promtail/scraping/#promtail-scraping-service-discovery) documentation. Agent Operator loads this configuration into the `LogsInstance` agents automatically. - -The DaemonSet of logging agents should be tailing your container logs, applying default labels to the log lines, and shipping them to your remote Loki endpoint. - -## Summary - -You've now rolled out the following into your cluster: - -- A `GrafanaAgent` resource that discovers one or more `MetricsInstance` and `LogsInstances` resources. -- A `MetricsInstance` resource that defines where to ship collected metrics. -- A `ServiceMonitor` resource to collect cAdvisor and kubelet metrics. -- A `LogsInstance` resource that defines where to ship collected logs. -- A `PodLogs` resource to collect container logs from Kubernetes Pods. - -## What's next - -You can verify that everything is working correctly by navigating to your Grafana instance and querying your Loki and Prometheus data sources. - -> Tip: You can deploy multiple GrafanaAgent resources to isolate allocated resources to the agent pods. By default, the GrafanaAgent resource determines the resources of all deployed agent containers. However, you might want different memory limits for metrics versus logs. diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md deleted file mode 100644 index e739388087..0000000000 --- a/docs/sources/operator/getting-started.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/getting-started/ -- /docs/grafana-cloud/send-data/agent/operator/getting-started/ -canonical: https://grafana.com/docs/agent/latest/operator/getting-started/ -description: Learn how to install the Operator -title: Install the Operator -weight: 110 ---- - -# Install the Operator - -In this guide, you'll learn how to deploy [Grafana Agent Operator]({{< relref "./_index.md" >}}) into your Kubernetes cluster. This guide does not use Helm. To learn how to deploy Agent Operator using the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator), see [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started.md" >}}). - -> **Note**: If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -## Before you begin - -To deploy Agent Operator, make sure that you have the following: - -- A Kubernetes cluster -- The `kubectl` command-line client installed and configured on your machine - -> **Note:** Agent Operator is currently in beta and its custom resources are subject to change. - -## Deploy the Agent Operator Custom Resource Definitions (CRDs) - -Before you can create the custom resources for a Grafana Agent deployment, -you need to deploy the -[Custom Resource Definitions](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) -to the cluster. These definitions describe the schema that the custom -resources will conform to. This is also required for Grafana Agent Operator to run; it -will fail if it can't find the Custom Resource Definitions of objects it is -looking to use. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). - -You can find the set of Custom Resource Definitions for Grafana Agent Operator in the Grafana Agent repository under -[`operations/agent-static-operator/crds`](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds). - -To deploy the CRDs: - -1. Clone the agent repo and then apply the CRDs from the root of the agent repository: - ``` - kubectl apply -f production/operator/crds - ``` - - This step _must_ be completed before installing Agent Operator—it will -fail to start if the CRDs do not exist. - -2. To check that the CRDs are deployed to your Kubernetes cluster and to access documentation for each resource, use `kubectl explain `. - - For example, `kubectl explain GrafanaAgent` describes the GrafanaAgent CRD, and `kubectl explain GrafanaAgent.spec` gives you information on its spec field. - -## Install Grafana Agent Operator - -Next, install Agent Operator by applying the Agent Operator deployment schema. - -To install Agent Operator: - -1. Copy the following deployment schema to a file, updating the namespace if needed: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent-operator - namespace: default - labels: - app: grafana-agent-operator - spec: - replicas: 1 - selector: - matchLabels: - app: grafana-agent-operator - template: - metadata: - labels: - app: grafana-agent-operator - spec: - serviceAccountName: grafana-agent-operator - containers: - - name: operator - image: grafana/agent-operator:{{< param "AGENT_RELEASE" >}} - args: - - --kubelet-service=default/kubelet - --- - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: grafana-agent-operator - namespace: default - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: grafana-agent-operator - rules: - - apiGroups: [monitoring.grafana.com] - resources: - - grafanaagents - - metricsinstances - - logsinstances - - podlogs - - integrations - verbs: [get, list, watch] - - apiGroups: [monitoring.coreos.com] - resources: - - podmonitors - - probes - - servicemonitors - verbs: [get, list, watch] - - apiGroups: [""] - resources: - - namespaces - - nodes - verbs: [get, list, watch] - - apiGroups: [""] - resources: - - secrets - - services - - configmaps - - endpoints - verbs: [get, list, watch, create, update, patch, delete] - - apiGroups: ["apps"] - resources: - - statefulsets - - daemonsets - - deployments - verbs: [get, list, watch, create, update, patch, delete] - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: grafana-agent-operator - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-operator - subjects: - - kind: ServiceAccount - name: grafana-agent-operator - namespace: default - ``` - -2. Roll out the deployment in your cluster using `kubectl apply -f` followed by your deployment filename. - -> **Note**: If you want to run Agent Operator locally, make sure your kubectl context is correct. Running locally uses your current kubectl context. If it is set to your production environment, you could accidentally deploy a new Grafana Agent to production. Install CRDs on the cluster prior to running locally. Afterwards, you can run Agent Operator using `go run ./cmd/grafana-agent-operator`. - -## Deploy the Grafana Agent Operator resources - -Agent Operator is now up and running. Next, you need to install a Grafana Agent for Agent Operator to run for you. To do so, follow the instructions in the [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources" >}}) topic. diff --git a/docs/sources/operator/helm-getting-started.md b/docs/sources/operator/helm-getting-started.md deleted file mode 100644 index bb63f01190..0000000000 --- a/docs/sources/operator/helm-getting-started.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/send-data/agent/operator/helm-getting-started/ -canonical: https://grafana.com/docs/agent/latest/operator/helm-getting-started/ -description: Learn how to install the Operator with Helm charts -title: Install the Operator with Helm -weight: 100 ---- -# Install the Operator with Helm - -In this guide, you'll learn how to deploy [Grafana Agent Operator]({{< relref "./_index.md" >}}) into your Kubernetes cluster using the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator). To learn how to deploy Agent Operator without using Helm, see [Install Grafana Agent Operator]({{< relref "./getting-started.md" >}}). - -> **Note**: If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. - -## Before you begin - -To deploy Agent Operator with Helm, make sure that you have the following: - -- A Kubernetes cluster -- The [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) command-line client installed and configured on your machine -- The [`helm`](https://helm.sh/docs/intro/install/) command-line client installed and configured on your machine - -> **Note:** Agent Operator is currently in beta and its custom resources are subject to change. - -## Install the Agent Operator Helm Chart - -In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. - -To install the Agent Operator Helm chart: - -1. Add and update the `grafana` Helm chart repo: - - ```bash - helm repo add grafana https://grafana.github.io/helm-charts - helm repo update - ``` - -1. Install the chart, replacing `my-release` with your release name: - - ```bash - helm install my-release grafana/grafana-agent-operator - ``` - - If you want to modify the default parameters, you can create a `values.yaml` file and pass it to `helm install`: - - ```bash - helm install my-release grafana/grafana-agent-operator -f values.yaml - ``` - - If you want to deploy Agent Operator into a namespace other than `default`, use the `-n` flag: - - ```bash - helm install my-release grafana/grafana-agent-operator -f values.yaml -n my-namespace - ``` - You can find a list of configurable template parameters in the [Helm chart repository](https://github.com/grafana/helm-charts/blob/main/charts/agent-operator/values.yaml). - -1. Once you've successfully deployed the Helm release, confirm that Agent Operator is up and running: - - ```bash - kubectl get pod - kubectl get svc - ``` - - You should see an Agent Operator Pod in `RUNNING` state, and a `kubelet` service. Depending on your setup, this could take a moment. - -## Deploy the Grafana Agent Operator resources - - Agent Operator is now up and running. Next, you need to install a Grafana Agent for Agent Operator to run for you. To do so, follow the instructions in the [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources.md" >}}) topic. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). diff --git a/docs/sources/operator/hierarchy.dot b/docs/sources/operator/hierarchy.dot deleted file mode 100644 index d17522aa17..0000000000 --- a/docs/sources/operator/hierarchy.dot +++ /dev/null @@ -1,71 +0,0 @@ -digraph G { - fontname="Courier New" - edge [fontname="Courier New"] - rankdir="TB" - edge [fontsize=10] - - "GrafanaAgent" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
GrafanaAgent
app.kubernetes.io/name: grafana-agent-operator
> - ]; - "MetricsInstance" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
MetricsInstance
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "LogsInstance" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
LogsInstance
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "ServiceMonitor" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
ServiceMonitor
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "Probe" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
Probe
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "PodLogs" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
PodLogs
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - - - GrafanaAgent -> MetricsInstance [ label="matchLabels:\l instance: primary" ]; - MetricsInstance -> Probe [ label="matchLabels:\l instance: primary" ]; - MetricsInstance -> ServiceMonitor [ label="matchLabels:\l instance: primary" ]; - - GrafanaAgent -> LogsInstance [ label="matchLabels:\l instance: primary" ]; - LogsInstance -> PodLogs [ label="matchLabels:\l instance: primary" ]; - - - -} diff --git a/docs/sources/operator/operator-integrations.md b/docs/sources/operator/operator-integrations.md deleted file mode 100644 index fc49836f81..0000000000 --- a/docs/sources/operator/operator-integrations.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/operator-integrations/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/operator-integrations/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/operator-integrations/ -- /docs/grafana-cloud/send-data/agent/operator/operator-integrations/ -canonical: https://grafana.com/docs/agent/latest/operator/operator-integrations/ -description: Learn how to set up integrations -title: Set up integrations -weight: 350 ---- -# Set up integrations - -This topic provides examples of setting up Grafana Agent Operator integrations, including [node_exporter](#set-up-an-agent-operator-node_exporter-integration) and [mysqld_exporter](#set-up-an-agent-operator-mysqld_exporter-integration). - -## Before you begin - -Before you begin, make sure that you have deployed the Grafana Agent Operator CRDs and installed Agent Operator into your cluster. See [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started.md" >}}) or [Install Grafana Agent Operator]({{< relref "./getting-started.md" >}}) for instructions. - -Also, make sure that you [deploy the GrafanaAgent resource]({{< relref "./deploy-agent-operator-resources.md" >}}) and the `yaml` you use has the `integrations` definition under `spec`. - -**Important:** The field `name` under the `spec` section of the manifest must contain the name of the integration to be installed according to the list of integrations defined [here]({{< relref "../static/configuration/integrations/integrations-next/_index.md#config-changes" >}}). - -**Important:** The value of the `metrics_instance` field needs to be in the format `/`, with namespace and name matching the values defined in the `metadata` section from the `MetricsInstance` resource as explained in [deploy a MetricsInstance resource]({{< relref "./deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" >}}) - -## Set up an Agent Operator node_exporter integration - -The Agent Operator node_exporter integration lets you monitor your hardware and OS metrics from Unix-based machines, including Linux machines. - -To set up a node_exporter integration: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: Integration - metadata: - name: node-exporter - namespace: default - labels: - agent: grafana-agent-integrations - spec: - name: node_exporter - type: - allNodes: true - unique: true - config: - autoscrape: - enable: true - metrics_instance: default/primary - rootfs_path: /default/node_exporter/rootfs - sysfs_path: /default/node_exporter/sys - procfs_path: /default/node_exporter/proc - volumeMounts: - - mountPath: /default/node_exporter/proc - name: proc - - mountPath: /default/node_exporter/sys - name: sys - - mountPath: /default/node_exporter/rootfs - name: root - volumes: - - name: proc - hostPath: - path: /proc - - name: sys - hostPath: - path: /sys - - name: root - hostPath: - path: /root - ``` - -2. Customize the manifest as needed and roll it out to your cluster using `kubectl apply -f` followed by the filename. - - The manifest causes Agent Operator to create an instance of a grafana-agent-integrations-deploy resource that exports Node metrics. - -## Set up an Agent Operator mysqld_exporter integration - -The Agent Operator mysqld_exporter integration is an embedded version of mysqld_exporter that lets you collect metrics from MySQL servers. - -To set up a mysqld_exporter integration: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: Integration - metadata: - name: mysqld-exporter - namespace: default - labels: - agent: grafana-agent-integrations - spec: - name: mysql - type: - allNodes: true - unique: true - config: - autoscrape: - enable: true - metrics_instance: default/primary - data_source_name: root@(server-a:3306)/ - ``` - -2. Customize the manifest as needed and roll it out to your cluster using `kubectl apply -f` followed by the filename. - - The manifest causes Agent Operator to create an instance of a grafana-agent-integrations-deploy resource that exports MySQL metrics. diff --git a/docs/sources/operator/release-notes.md b/docs/sources/operator/release-notes.md deleted file mode 100644 index ec96084cd9..0000000000 --- a/docs/sources/operator/release-notes.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -aliases: -- ./upgrade-guide/ -- /docs/grafana-cloud/agent/operator/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/release-notes/ -- /docs/grafana-cloud/send-data/agent/operator/release-notes/ -canonical: https://grafana.com/docs/agent/latest/operator/release-notes/ -description: Release notes for Grafana Agent Operator -menuTitle: Release notes -title: Release notes for Grafana Agent Operator -weight: 999 ---- - -# Release notes for Grafana Agent Operator - -The release notes provide information about deprecations and breaking changes in Grafana Agent static mode Kubernetes operator. - -For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -> **Note:** These release notes are specific to the Static mode Kubernetes Operator. -> Other release notes for the different Grafana Agent variants are contained on separate pages: -> -> - [Static mode release notes][release-notes-static] -> - [Flow mode release notes][release-notes-flow] - -{{% docs/reference %}} -[release-notes-static]: "/docs/agent/ -> /docs/agent//static/release-notes" -[release-notes-static]: "/docs/agent/ -> /docs/grafana-cloud/send-data/agent/static/release-notes" - -[release-notes-flow]: "/docs/agent/ -> /docs/agent//flow/release-notes" -[release-notes-flow]: "/docs/grafana-cloud/ -> /docs/agent//flow/release-notes" -{{% /docs/reference %}} - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, the `grafana-operator` release binary names is now -prefixed with `grafana-`: - -- `agent-operator` is now `grafana-agent-operator`. - -For the `grafana/agent-operator` Docker container, the entrypoint is now -`/bin/grafana-agent-operator`. A symbolic link from `/bin/agent-operator` to -the new binary has been added. - -Symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.29 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The `agent-operator` binary name has been deprecated and will be renamed to -`grafana-agent-operator` in the v0.31.0 release. - -As part of this change, the Docker container for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. - -## v0.24 - -### Breaking change: Grafana Agent Operator supported Agent versions - -The v0.24.0 release of Grafana Agent Operator can no longer deploy versions of -Grafana Agent prior to v0.24.0. - -## v0.19 - -### Rename of Prometheus to Metrics (Breaking change) - -As a part of the deprecation of "Prometheus," all Operator CRDs and fields with -"Prometheus" in the name have changed to "Metrics." - -This includes: - -- The `PrometheusInstance` CRD is now `MetricsInstance` (referenced by - `metricsinstances` and not `metrics-instances` within ClusterRoles). -- The `Prometheus` field of the `GrafanaAgent` resource is now `Metrics` -- `PrometheusExternalLabelName` is now `MetricsExternalLabelName` - -This is a hard breaking change, and all fields must change accordingly for the -operator to continue working. - -Note that old CRDs with the old hyphenated names must be deleted (`kubectl -delete crds/{grafana-agents,prometheus-instances}`) for ClusterRoles to work -correctly. - -To do a zero-downtime upgrade of the Operator when there is a breaking change, -refer to the new `agentctl operator-detatch` command: this will iterate through -all of your objects and remove any OwnerReferences to a CRD, allowing you to -delete your Operator CRDs or CRs. - -### Rename of CRD paths (Breaking change) - -`prometheus-instances` and `grafana-agents` have been renamed to -`metricsinstances` and `grafanaagents` respectively. This is to remain -consistent with how Kubernetes names multi-word objects. - -As a result, you will need to update your ClusterRoles to change the path of -resources. - -To do a zero-downtime upgrade of the Operator when there is a breaking change, -refer to the new `agentctl operator-detatch` command: this will iterate through -all of your objects and remove any OwnerReferences to a CRD, allowing you to -delete your Operator CRDs or CRs. - - -Example old ClusterRole: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-operator -rules: -- apiGroups: [monitoring.grafana.com] - resources: - - grafana-agents - - prometheus-instances - verbs: [get, list, watch] -``` - -Example new ClusterRole: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-operator -rules: -- apiGroups: [monitoring.grafana.com] - resources: - - grafanaagents - - metricsinstances - verbs: [get, list, watch] -``` diff --git a/docs/sources/reference/_index.md b/docs/sources/reference/_index.md new file mode 100644 index 0000000000..6845cd1220 --- /dev/null +++ b/docs/sources/reference/_index.md @@ -0,0 +1,15 @@ +--- +aliases: +- ./reference/ +canonical: https://grafana.com/docs/alloy/latest/reference/ +description: The reference-level documentaiton for Grafana Aloy +menuTitle: Reference +title: Grafana Alloy Reference +weight: 600 +--- + +# {{% param "PRODUCT_NAME" %}} Reference + +This section provides reference-level documentation for the various parts of {{< param "PRODUCT_NAME" >}}: + +{{< section >}} diff --git a/docs/sources/reference/cli/_index.md b/docs/sources/reference/cli/_index.md new file mode 100644 index 0000000000..66e9c82b1d --- /dev/null +++ b/docs/sources/reference/cli/_index.md @@ -0,0 +1,29 @@ +--- +aliases: +- ./reference/cli/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/ +description: Learn about the Grafana Alloy command line interface +menuTitle: Command-line interface +title: The Grafana Agent command-line interface +weight: 100 +--- + +# The {{% param "PRODUCT_ROOT_NAME" %}} command-line interface + +The `grafana-alloy` binary exposes a command-line interface with subcommands to perform various operations. + +The most common subcommand is [`run`][run] which accepts a configuration file and starts {{< param "PRODUCT_NAME" >}}. + +Available commands: + +* [`convert`][convert]: Convert a {{< param "PRODUCT_ROOT_NAME" >}} configuration file. +* [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. +* [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. +* [`tools`][tools]: Read the WAL and provide statistical information. +* `completion`: Generate shell completion for the `grafana-agent-flow` CLI. +* `help`: Print help for supported commands. + +[run]: ./run/ +[fmt]: ./fmt/ +[convert]: ./convert/ +[tools]: ./tools/ diff --git a/docs/sources/reference/cli/convert.md b/docs/sources/reference/cli/convert.md new file mode 100644 index 0000000000..1a8ccfc7b2 --- /dev/null +++ b/docs/sources/reference/cli/convert.md @@ -0,0 +1,107 @@ +--- +aliases: +- ./reference/cli/convert/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/convert/ +description: Learn about the convert command +labels: + stage: beta +menuTitle: convert +title: The convert command +weight: 100 +--- + +# The convert command + +The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. + +## Usage + +Usage: + +* `AGENT_MODE=flow grafana-agent convert [ ...] ` +* `grafana-agent-flow convert [ ...] ` + + Replace the following: + + * _``_: One or more flags that define the input and output of the command. + * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. + +If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `convert` converts the contents of standard input. +Otherwise, `convert` reads and converts the file from disk specified by the argument. + +There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted configuration to a specified path. +You can use the `--report` flag to generate a diagnostic report. +The `--bypass-errors` flag allows you to bypass any [errors][] generated during the file conversion. + +The command fails if the source configuration has syntactically incorrect configuration or can't be converted to {{< param "PRODUCT_NAME" >}} River format. + +The following flags are supported: + +* `--output`, `-o`: The filepath and filename where the output is written. +* `--report`, `-r`: The filepath and filename where the report is written. +* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [prometheus][], [promtail][], [static][]. +* `--bypass-errors`, `-b`: Enable bypassing errors when converting. +* `--extra-args`, `e`: Extra arguments from the original format used by the converter. + +### Defaults + +{{< param "PRODUCT_NAME" >}} defaults are managed as follows: +* If a provided source configuration value matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default doesn't match a {{< param "PRODUCT_NAME" >}} default value, the default value is included in the output. + +### Errors + +Errors are defined as non-critical issues identified during the conversion where an output can still be generated. +These can be bypassed using the `--bypass-errors` flag. + +### Prometheus + +Using the `--source-format=prometheus` will convert the source configuration from [Prometheus v2.45][] to a {{< param "PRODUCT_NAME" >}} configuration. + +This includes Prometheus features such as [scrape_config][], [relabel_config][], [metric_relabel_configs][], [remote_write][], and many supported *_sd_configs. +Unsupported features in a source configuration result in [errors][]. + +Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}][migrate prometheus] for a detailed migration guide. + +### Promtail + +Using the `--source-format=promtail` will convert the source configuration from [Promtail v2.8.x][] to a {{< param "PRODUCT_NAME" >}} configuration. + +Nearly all [Promtail features][] are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. + +If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a flow configuration. +The converter will also raise warnings for configuration options that may require your attention. + +Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate promtail] for a detailed migration guide. + +### Static + +Using the `--source-format=static` will convert the source configuration from a [Grafana Agent Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. + +Include `--extra-args` for passing additional command line flags from the original format. +For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static [integrations-next][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. +You can also expand environment variables with `--extra-args="-config.expand-env"`. +You can combine multiple command line flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. + +If you have unsupported features in a Grafana Agent Static mode source configuration, you will receive [errors][] when you convert to a {{< param "PRODUCT_NAME" >}} configuration. +The converter will also raise warnings for configuration options that may require your attention. + +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}][migrate static] for a detailed migration guide. + +[prometheus]: #prometheus +[promtail]: #promtail +[static]: #static +[errors]: #errors +[scrape_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config +[relabel_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config +[metric_relabel_configs]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#metric_relabel_configs +[remote_write]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write +[migrate prometheus]: ../../../tasks/migrate/from-prometheus/ +[Promtail v2.8.x]: https://grafana.com/docs/loki/v2.8.x/clients/promtail/ +[Prometheus v2.45]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/ +[Promtail features]: https://grafana.com/docs/loki/v2.8.x/clients/promtail/configuration/ +[migrate promtail]: ../../../tasks/migrate/from-promtail/ +[Grafana Agent Static]: https://grafana.com/docs/agent/latest/static/ +[integrations-next]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ +[migrate static]: ../../../tasks/migrate/from-static/ diff --git a/docs/sources/reference/cli/fmt.md b/docs/sources/reference/cli/fmt.md new file mode 100644 index 0000000000..2163deb38c --- /dev/null +++ b/docs/sources/reference/cli/fmt.md @@ -0,0 +1,37 @@ +--- +aliases: +- ./reference/cli/fmt/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/fmt/ +description: Learn about the fmt command +menuTitle: fmt +title: The fmt command +weight: 200 +--- + +# The fmt command + +The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration file. + +## Usage + +Usage: + +* `AGENT_MODE=flow grafana-agent fmt [FLAG ...] FILE_NAME` +* `grafana-agent-flow fmt [FLAG ...] FILE_NAME` + + Replace the following: + + * `FLAG`: One or more flags that define the input and output of the command. + * `FILE_NAME`: The {{< param "PRODUCT_NAME" >}} configuration file. + +If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `fmt` formats the contents of standard input. +Otherwise, `fmt` reads and formats the file from disk specified by the argument. + +The `--write` flag can be specified to replace the contents of the original file on disk with the formatted results. +`--write` can only be provided when `fmt` isn't reading from standard input. + +The command fails if the file being formatted has syntactically incorrect River configuration, but doesn't validate whether {{< param "PRODUCT_NAME" >}} components are configured properly. + +The following flags are supported: + +* `--write`, `-w`: Write the formatted file back to disk when not reading from standard input. diff --git a/docs/sources/flow/reference/cli/run.md b/docs/sources/reference/cli/run.md similarity index 56% rename from docs/sources/flow/reference/cli/run.md rename to docs/sources/reference/cli/run.md index 4da0df47a4..9cb201d2b2 100644 --- a/docs/sources/flow/reference/cli/run.md +++ b/docs/sources/reference/cli/run.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/run/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ +- ./reference/cli/run/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/run/ description: Learn about the run command menuTitle: run title: The run command @@ -27,27 +24,21 @@ Usage: * `FLAG`: One or more flags that define the input and output of the command. * `PATH_NAME`: Required. The {{< param "PRODUCT_NAME" >}} configuration file/directory path. -If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or -contains errors during the initial load, the `run` command will immediately exit and show an error message. +If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or contains errors during the initial load, the `run` command will immediately exit and show an error message. -If you give the `PATH_NAME` argument a directory path, {{< param "PRODUCT_NAME" >}} will find `*.river` files -(ignoring nested directories) and load them as a single configuration source. However, component names must -be **unique** across all River files, and configuration blocks must not be repeated. +If you give the `PATH_NAME` argument a directory path, {{< param "PRODUCT_NAME" >}} will find `*.river` files (ignoring nested directories) and load them as a single configuration source. +However, component names must be **unique** across all River files, and configuration blocks must not be repeated. -{{< param "PRODUCT_NAME" >}} will continue to run if subsequent reloads of the configuration -file fail, potentially marking components as unhealthy depending on the nature -of the failure. When this happens, {{< param "PRODUCT_NAME" >}} will continue functioning -in the last valid state. +{{< param "PRODUCT_NAME" >}} will continue to run if subsequent reloads of the configuration file fail, potentially marking components as unhealthy depending on the nature of the failure. +When this happens, {{< param "PRODUCT_NAME" >}} will continue functioning in the last valid state. -`run` launches an HTTP server that exposes metrics about itself and its -components. The HTTP server is also exposes a UI at `/` for debugging -running components. +`run` launches an HTTP server that exposes metrics about itself and its components. +The HTTP server is also exposes a UI at `/` for debugging running components. The following flags are supported: -* `--server.http.enable-pprof`: Enable /debug/pprof profiling endpoints. (default `true`) -* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on - (default `agent.internal:12345`). +* `--server.http.enable-pprof`: Enable /debug/pprof profiling endpoints. (default `true`). +* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on (default `agent.internal:12345`). * `--server.http.listen-addr`: Address to listen for HTTP traffic on (default `127.0.0.1:12345`). * `--server.http.ui-path-prefix`: Base path where the UI is exposed (default `/`). * `--storage.path`: Base directory where components can store data (default `data-agent/`). @@ -65,10 +56,6 @@ The following flags are supported: * `--config.bypass-conversion-errors`: Enable bypassing errors when converting (default `false`). * `--config.extra-args`: Extra arguments from the original format used by the converter. -[in-memory HTTP traffic]: {{< relref "../../concepts/component_controller.md#in-memory-traffic" >}} -[data collection]: {{< relref "../../../data-collection" >}} -[components]: {{< relref "../../concepts/components.md" >}} - ## Update the configuration file The configuration file can be reloaded from disk by either: @@ -76,74 +63,46 @@ The configuration file can be reloaded from disk by either: * Sending an HTTP POST request to the `/-/reload` endpoint. * Sending a `SIGHUP` signal to the {{< param "PRODUCT_NAME" >}} process. -When this happens, the [component controller][] synchronizes the set of running -components with the latest set of components specified in the configuration file. -Components that are no longer defined in the configuration file after reloading are -shut down, and components that have been added to the configuration file since the -previous reload are created. - -All components managed by the component controller are reevaluated after -reloading. +When this happens, the [component controller][] synchronizes the set of running components with the latest set of components specified in the configuration file. +Components that are no longer defined in the configuration file after reloading are shut down, and components that have been added to the configuration file since the previous reload are created. -[component controller]: {{< relref "../../concepts/component_controller.md" >}} +All components managed by the component controller are reevaluated after reloading. ## Clustering (beta) -The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in -[clustering][] mode. The rest of the `--cluster.*` command-line flags can be -used to configure how nodes discover and connect to one another. +The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in [clustering][] mode. +The rest of the `--cluster.*` command-line flags can be used to configure how nodes discover and connect to one another. -Each cluster member’s name must be unique within the cluster. Nodes which try -to join with a conflicting name are rejected and will fall back to -bootstrapping a new cluster of their own. +Each cluster member’s name must be unique within the cluster. +Nodes which try to join with a conflicting name are rejected and will fall back to bootstrapping a new cluster of their own. -Peers communicate over HTTP/2 on the built-in HTTP server. Each node -must be configured to accept connections on `--server.http.listen-addr` and the -address defined or inferred in `--cluster.advertise-address`. +Peers communicate over HTTP/2 on the built-in HTTP server. +Each node must be configured to accept connections on `--server.http.listen-addr` and the address defined or inferred in `--cluster.advertise-address`. -If the `--cluster.advertise-address` flag isn't explicitly set, {{< param "PRODUCT_NAME" >}} -tries to infer a suitable one from `--cluster.advertise-interfaces`. -If `--cluster.advertise-interfaces` isn't explicitly set, {{< param "PRODUCT_NAME" >}} will -infer one from the `eth0` and `en0` local network interfaces. +If the `--cluster.advertise-address` flag isn't explicitly set, {{< param "PRODUCT_NAME" >}} tries to infer a suitable one from `--cluster.advertise-interfaces`. +If `--cluster.advertise-interfaces` isn't explicitly set, {{< param "PRODUCT_NAME" >}} will infer one from the `eth0` and `en0` local network interfaces. {{< param "PRODUCT_NAME" >}} will fail to start if it can't determine the advertised address. -Since Windows doesn't use the interface names `eth0` or `en0`, Windows users must explicitly pass -at least one valid network interface for `--cluster.advertise-interfaces` or a value for `--cluster.advertise-address`. - -The comma-separated list of addresses provided in `--cluster.join-addresses` -can either be IP addresses with an optional port, or DNS records to lookup. -The ports on the list of addresses default to the port used for the HTTP -listener if not explicitly provided. We recommend that you -align the port numbers on as many nodes as possible to simplify the deployment -process. - -The `--cluster.discover-peers` command-line flag expects a list of tuples in -the form of `provider=XXX key=val key=val ...`. Clustering uses the -[go-discover] package to discover peers and fetch their IP addresses, based -on the chosen provider and the filtering key-values it supports. Clustering -supports the default set of providers available in go-discover and registers -the `k8s` provider on top. - -If either the key or the value in a tuple pair contains a space, a backslash, or -double quotes, then it must be quoted with double quotes. Within this quoted -string, the backslash can be used to escape double quotes or the backslash -itself. - -The `--cluster.rejoin-interval` flag defines how often each node should -rediscover peers based on the contents of the `--cluster.join-addresses` and -`--cluster.discover-peers` flags and try to rejoin them. This operation -is useful for addressing split-brain issues if the initial bootstrap is -unsuccessful and for making clustering easier to manage in dynamic -environments. To disable this behavior, set the `--cluster.rejoin-interval` -flag to `"0s"`. - -Discovering peers using the `--cluster.join-addresses` and -`--cluster.discover-peers` flags only happens on startup; after that, cluster -nodes depend on gossiping messages with each other to converge on the cluster's -state. - -The first node that is used to bootstrap a new cluster (also known as -the "seed node") can either omit the flags that specify peers to join or can -try to connect to itself. +Since Windows doesn't use the interface names `eth0` or `en0`, Windows users must explicitly pass at least one valid network interface for `--cluster.advertise-interfaces` or a value for `--cluster.advertise-address`. + +The comma-separated list of addresses provided in `--cluster.join-addresses` can either be IP addresses with an optional port, or DNS records to lookup. +The ports on the list of addresses default to the port used for the HTTP listener if not explicitly provided. +We recommend that you align the port numbers on as many nodes as possible to simplify the deployment process. + +The `--cluster.discover-peers` command-line flag expects a list of tuples in the form of `provider=XXX key=val key=val ...`. +Clustering uses the [go-discover] package to discover peers and fetch their IP addresses, based on the chosen provider and the filtering key-values it supports. +Clustering supports the default set of providers available in go-discover and registers the `k8s` provider on top. + +If either the key or the value in a tuple pair contains a space, a backslash, or double quotes, then it must be quoted with double quotes. +Within this quoted string, the backslash can be used to escape double quotes or the backslash itself. + +The `--cluster.rejoin-interval` flag defines how often each node should rediscover peers based on the contents of the `--cluster.join-addresses` and `--cluster.discover-peers` flags and try to rejoin them. +This operation is useful for addressing split-brain issues if the initial bootstrap is unsuccessful and for making clustering easier to manage in dynamic environments. +To disable this behavior, set the `--cluster.rejoin-interval` flag to `"0s"`. + +Discovering peers using the `--cluster.join-addresses` and `--cluster.discover-peers` flags only happens on startup. +After that, cluster nodes depend on gossiping messages with each other to converge on the cluster's state. + +The first node that is used to bootstrap a new cluster (also known as the "seed node") can either omit the flags that specify peers to join or can try to connect to itself. To join or rejoin a cluster, {{< param "PRODUCT_NAME" >}} will try to connect to a certain number of peers limited by the `--cluster.max-join-peers` flag. This flag can be useful for clusters of significant sizes because connecting to a high number of peers can be an expensive operation. @@ -160,26 +119,18 @@ Attempting to join a cluster with a wrong `--cluster.name` will result in a "fai Clustered {{< param "PRODUCT_ROOT_NAME" >}}s are in one of three states: * **Viewer**: {{< param "PRODUCT_NAME" >}} has a read-only view of the cluster and isn't participating in workload distribution. - * **Participant**: {{< param "PRODUCT_NAME" >}} is participating in workload distribution for components that have clustering enabled. - * **Terminating**: {{< param "PRODUCT_NAME" >}} is shutting down and will no longer assign new work to itself. -Each {{< param "PRODUCT_ROOT_NAME" >}} initially joins the cluster in the viewer state and then transitions to -the participant state after the process startup completes. Each {{< param "PRODUCT_ROOT_NAME" >}} then -transitions to the terminating state when shutting down. +Each {{< param "PRODUCT_ROOT_NAME" >}} initially joins the cluster in the viewer state and then transitions to the participant state after the process startup completes. +Each {{< param "PRODUCT_ROOT_NAME" >}} then transitions to the terminating state when shutting down. The current state of a clustered {{< param "PRODUCT_ROOT_NAME" >}} is shown on the clustering page in the [UI][]. -[UI]: {{< relref "../../tasks/debug.md#clustering-page" >}} - ## Configuration conversion (beta) -When you use the `--config.format` command-line argument with a value -other than `flow`, {{< param "PRODUCT_ROOT_NAME" >}} converts the configuration file from -the source format to River and immediately starts running with the new -configuration. This conversion uses the converter API described in the -[grafana-agent-flow convert][] docs. +When you use the `--config.format` command-line argument with a value other than `flow`, {{< param "PRODUCT_ROOT_NAME" >}} converts the configuration file from the source format to River and immediately starts running with the new configuration. +This conversion uses the converter API described in the [grafana-alloy convert][] docs. If you include the `--config.bypass-conversion-errors` command-line argument, {{< param "PRODUCT_NAME" >}} will ignore any errors from the converter. Use this argument @@ -189,6 +140,11 @@ original configuration. Include `--config.extra-args` to pass additional command line flags from the original format to the converter. Refer to [grafana-agent-flow convert][] for more details on how `extra-args` work. -[grafana-agent-flow convert]: {{< relref "./convert.md" >}} -[clustering]: {{< relref "../../concepts/clustering.md" >}} +[grafana-alloy convert]: ../convert/ +[clustering]: ../../../concepts/clustering/ [go-discover]: https://github.com/hashicorp/go-discover +[in-memory HTTP traffic]: ../../../concepts/component_controller/#in-memory-traffic +[data collection]: ../../../data-collection/ +[components]: ../../concepts/components/ +[component controller]: ../../../concepts/component_controller/ +[UI]: ../../tasks/debug/#clustering-page diff --git a/docs/sources/flow/reference/cli/tools.md b/docs/sources/reference/cli/tools.md similarity index 69% rename from docs/sources/flow/reference/cli/tools.md rename to docs/sources/reference/cli/tools.md index b9fb73a761..2eb29895bb 100644 --- a/docs/sources/flow/reference/cli/tools.md +++ b/docs/sources/reference/cli/tools.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/tools/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/tools/ +- ./reference/cli/tools/ +canonical: https://grafana.com/docs/aalloyent/latest/reference/cli/tools/ description: Learn about the tools command menuTitle: tools title: The tools command @@ -13,11 +10,10 @@ weight: 400 # The tools command -The `tools` command contains command line tooling grouped by Flow component. +The `tools` command contains command line tooling grouped by {{< param "PRODUCT_NAME" >}} component. {{< admonition type="caution" >}} -Utilities in this command have no backward compatibility -guarantees and may change or be removed between releases. +Utilities in this command have no backward compatibility guarantees and may change or be removed between releases. {{< /admonition >}} ## Subcommands @@ -29,8 +25,7 @@ Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` -The `sample-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects information on metric samples within it. +The `sample-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects information on metric samples within it. For each metric discovered, `sample-stats` emits: @@ -52,13 +47,9 @@ Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` -The `target-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects metric cardinality information for a specific -target. +The `target-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects metric cardinality information for a specific target. -For the target specified by the `--job` and `--instance` flags, unique metric -names for that target are printed along with the number of series with that -metric name. +For the target specified by the `--job` and `--instance` flags, unique metric names for that target are printed along with the number of series with that metric name. The following flags are supported: @@ -69,13 +60,12 @@ The `--job` and `--instance` labels are required. ### prometheus.remote_write wal-stats -Usage: +Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write wal-stats WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write wal-stats WAL_DIRECTORY` -The `wal-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects general information about it. +The `wal-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects general information about it. The following information is reported: @@ -89,9 +79,7 @@ The following information is reported: * The oldest segment number in the WAL. * The newest segment number in the WAL. -Additionally, `wal-stats` reports per-target information, where a target is -defined as a unique combination of the `job` and `instance` label values. For -each target, `wal-stats` reports the number of series and the number of -metric samples associated with that target. +Additionally, `wal-stats` reports per-target information, where a target is defined as a unique combination of the `job` and `instance` label values. +For each target, `wal-stats` reports the number of series and the number of metric samples associated with that target. The `wal-stats` command does not support any flags. diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/reference/compatibility/_index.md similarity index 97% rename from docs/sources/flow/reference/compatibility/_index.md rename to docs/sources/reference/compatibility/_index.md index 61775bcf26..d3d3154f49 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/reference/compatibility/_index.md @@ -1,11 +1,8 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/send-data/agent/flow/reference/compatible-components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/compatibility/ -description: Learn about which components are compatible with each other in Grafana Agent Flow +- ./reference/compatibility/ +canonical: https://grafana.com/docs/alloy/latest/reference/compatibility/ +description: Learn about which components are compatible with each other in Grafana Alloy title: Compatible components weight: 400 --- diff --git a/docs/sources/reference/components/_index.md b/docs/sources/reference/components/_index.md new file mode 100644 index 0000000000..53ba25bff4 --- /dev/null +++ b/docs/sources/reference/components/_index.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/ +description: Learn about the components in Grafana Agent Flow +title: Components reference +weight: 300 +--- + +# Components reference + +This section contains reference documentation for all recognized [components][]. + +{{< section >}} + +[components]: ../../concepts/components/ diff --git a/docs/sources/flow/reference/components/discovery.azure.md b/docs/sources/reference/components/discovery.azure.md similarity index 67% rename from docs/sources/flow/reference/components/discovery.azure.md rename to docs/sources/reference/components/discovery.azure.md index 9970dc4fde..942932d59a 100644 --- a/docs/sources/flow/reference/components/discovery.azure.md +++ b/docs/sources/reference/components/discovery.azure.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.azure/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.azure/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.azure/ description: Learn about discovery.azure title: discovery.azure --- @@ -26,30 +21,30 @@ discovery.azure "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ---------- | ---------------------------------------------------------------------- | -------------------- | -------- -`environment` | `string` | Azure environment. | `"AzurePublicCloud"` | no -`port` | `number` | Port to be appended to the `__address__` label for each target. | `80` | no -`subscription_id` | `string` | Azure subscription ID. | | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `5m` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|----------------------|--------- +`environment` | `string` | Azure environment. | `"AzurePublicCloud"` | no +`port` | `number` | Port to be appended to the `__address__` label for each target. | `80` | no +`subscription_id` | `string` | Azure subscription ID. | | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `5m` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.azure`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -oauth | [oauth][] | OAuth configuration for Azure API. | no -managed_identity | [managed_identity][] | Managed Identity configuration for Azure API. | no -tls_config | [tls_config][] | TLS configuration for requests to the Azure API. | no +Hierarchy | Block | Description | Required +-----------------|----------------------|--------------------------------------------------|--------- +oauth | [oauth][] | OAuth configuration for Azure API. | no +managed_identity | [managed_identity][] | Managed Identity configuration for Azure API. | no +tls_config | [tls_config][] | TLS configuration for requests to the Azure API. | no Exactly one of the `oauth` or `managed_identity` blocks must be specified. @@ -60,11 +55,11 @@ Exactly one of the `oauth` or `managed_identity` blocks must be specified. ### oauth block The `oauth` block configures OAuth authentication for the Azure API. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`client_id` | `string` | OAuth client ID. | | yes -`client_secret` | `string` | OAuth client secret. | | yes -`tenant_id` | `string` | OAuth tenant ID. | | yes +Name | Type | Description | Default | Required +----------------|----------|----------------------|---------|--------- +`client_id` | `string` | OAuth client ID. | | yes +`client_secret` | `string` | OAuth client secret. | | yes +`tenant_id` | `string` | OAuth tenant ID. | | yes ### managed_identity block The `managed_identity` block configures Managed Identity authentication for the Azure API. @@ -75,7 +70,7 @@ Name | Type | Description | Default | Required ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/reference/components/discovery.consul.md similarity index 51% rename from docs/sources/flow/reference/components/discovery.consul.md rename to docs/sources/reference/components/discovery.consul.md index cf96dba94b..564a789556 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/reference/components/discovery.consul.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consul/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consul/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.consul/ description: Learn about discovery.consul title: discovery.consul --- @@ -27,30 +22,30 @@ discovery.consul "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`server` | `string` | Host and port of the Consul API. | `localhost:8500` | no -`token` | `secret` | Secret token used to access the Consul API. | | no -`datacenter` | `string` | Datacenter to query. If not provided, the default is used. | | no -`namespace` | `string` | Namespace to use (only supported in Consul Enterprise). | | no -`partition` | `string` | Admin partition to use (only supported in Consul Enterprise). | | no -`tag_separator` | `string` | The string by which Consul tags are joined into the tag label. | `,` | no -`scheme` | `string` | The scheme to use when talking to Consul. | `http` | no -`username` | `string` | The username to use (deprecated in favor of the basic_auth configuration). | | no -`password` | `secret` | The password to use (deprecated in favor of the basic_auth configuration). | | no -`allow_stale` | `bool` | Allow stale Consul results (see [official documentation][consistency documentation]). Will reduce load on Consul. | `true` | no -`services` | `list(string)` | A list of services for which targets are retrieved. If omitted, all services are scraped. | | no -`tags` | `list(string)` | An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. | | no -`node_meta` | `map(string)` | Node metadata key/value pairs to filter nodes for a given service. | | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------|------------------|--------- +`server` | `string` | Host and port of the Consul API. | `localhost:8500` | no +`token` | `secret` | Secret token used to access the Consul API. | | no +`datacenter` | `string` | Datacenter to query. If not provided, the default is used. | | no +`namespace` | `string` | Namespace to use (only supported in Consul Enterprise). | | no +`partition` | `string` | Admin partition to use (only supported in Consul Enterprise). | | no +`tag_separator` | `string` | The string by which Consul tags are joined into the tag label. | `,` | no +`scheme` | `string` | The scheme to use when talking to Consul. | `http` | no +`username` | `string` | The username to use (deprecated in favor of the basic_auth configuration). | | no +`password` | `secret` | The password to use (deprecated in favor of the basic_auth configuration). | | no +`allow_stale` | `bool` | Allow stale Consul results (see [official documentation][consistency documentation]). Will reduce load on Consul. | `true` | no +`services` | `list(string)` | A list of services for which targets are retrieved. If omitted, all services are scraped. | | no +`tags` | `list(string)` | An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. | | no +`node_meta` | `map(string)` | Node metadata key/value pairs to filter nodes for a given service. | | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -59,7 +54,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} [consistency documentation]: https://www.consul.io/api/features/consistency.html [arguments]: #arguments @@ -77,9 +72,8 @@ oauth2 | [oauth2][] | Configure OAuth2 for authenticating to oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -88,26 +82,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Consul catalog API. Each target includes the following labels: @@ -128,9 +122,8 @@ Each target includes the following labels: ## Component health -`discovery.consul` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.consul` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.consulagent.md b/docs/sources/reference/components/discovery.consulagent.md similarity index 94% rename from docs/sources/flow/reference/components/discovery.consulagent.md rename to docs/sources/reference/components/discovery.consulagent.md index 340d1f6b5d..9cc23de10b 100644 --- a/docs/sources/flow/reference/components/discovery.consulagent.md +++ b/docs/sources/reference/components/discovery.consulagent.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consulagent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consulagent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consulagent/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.consulagent/ description: Learn about discovery.consulagent title: discovery.consulagent --- @@ -53,7 +50,7 @@ The following blocks are supported inside the definition of ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/discovery.digitalocean.md b/docs/sources/reference/components/discovery.digitalocean.md similarity index 76% rename from docs/sources/flow/reference/components/discovery.digitalocean.md rename to docs/sources/reference/components/discovery.digitalocean.md index faaa8e1ea8..c2bc5b9639 100644 --- a/docs/sources/flow/reference/components/discovery.digitalocean.md +++ b/docs/sources/reference/components/discovery.digitalocean.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.digitalocean/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.digitalocean/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.digitalocean/ description: Learn about discovery.digitalocean title: discovery.digitalocean --- @@ -29,18 +24,18 @@ discovery.digitalocean "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`port` | `number` | Port to be appended to the `__address__` label for each Droplet. | `80` | no -`refresh_interval` | `duration` | Frequency to refresh list of Droplets. | `"1m"` | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`port` | `number` | Port to be appended to the `__address__` label for each Droplet. | `80` | no +`refresh_interval` | `duration` | Frequency to refresh list of Droplets. | `"1m"` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no The DigitalOcean API uses bearer tokens for authentication, see more about it in the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#section/Authentication). @@ -48,7 +43,7 @@ Exactly one of the [`bearer_token`](#arguments) and [`bearer_token_file`](#argum [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The `discovery.digitalocean` component does not support any blocks, and is configured fully through arguments. @@ -59,7 +54,7 @@ The `discovery.digitalocean` component does not support any blocks, and is confi The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the DigitalOcean API. Each target includes the following labels: diff --git a/docs/sources/flow/reference/components/discovery.dns.md b/docs/sources/reference/components/discovery.dns.md similarity index 70% rename from docs/sources/flow/reference/components/discovery.dns.md rename to docs/sources/reference/components/discovery.dns.md index a54890c240..73f1d8c8a8 100644 --- a/docs/sources/flow/reference/components/discovery.dns.md +++ b/docs/sources/reference/components/discovery.dns.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dns/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dns/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.dns/ description: Learn about discovery.dns title: discovery.dns --- @@ -26,12 +20,12 @@ discovery.dns "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`names` | `list(string)` | DNS names to look up. | | yes -`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no -`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no -`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------------------------------|---------|--------- +`names` | `list(string)` | DNS names to look up. | | yes +`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no +`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no +`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no ## Exported fields @@ -51,9 +45,8 @@ Each target includes the following labels: ## Component health -`discovery.dns` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.dns` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/reference/components/discovery.docker.md similarity index 59% rename from docs/sources/flow/reference/components/discovery.docker.md rename to docs/sources/reference/components/discovery.docker.md index d9b5a02713..0ab823d22d 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/reference/components/discovery.docker.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.docker/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.docker/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.docker/ description: Learn about discovery.docker title: discovery.docker --- @@ -27,20 +22,20 @@ discovery.docker "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`host` | `string` | Address of the Docker Daemon to connect to. | | yes -`port` | `number` | Port to use for collecting metrics when containers don't have any port mappings. | `80` | no -`host_networking_host` | `string` | Host to use if the container is in host networking mode. | `"localhost"` | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"1m"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------|--------- +`host` | `string` | Address of the Docker Daemon to connect to. | | yes +`port` | `number` | Port to use for collecting metrics when containers don't have any port mappings. | `80` | no +`host_networking_host` | `string` | Host to use if the container is in host networking mode. | `"localhost"` | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"1m"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -51,25 +46,24 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.docker`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -filter | [filter][] | Filters discoverable resources. | no -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +filter | [filter][] | Filters discoverable resources. | no +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [filter]: #filter-block [basic_auth]: #basic_auth-block @@ -79,42 +73,40 @@ an `oauth2` block. ### filter block -The `filter` block configures a filter to pass to the Docker Engine to limit -the amount of containers returned. The `filter` block can be specified multiple -times to provide more than one filter. +The `filter` block configures a filter to pass to the Docker Engine to limit the amount of containers returned. +The `filter` block can be specified multiple times to provide more than one filter. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | Filter name to use. | | yes -`values` | `list(string)` | Values to pass to the filter. | | yes +Name | Type | Description | Default | Required +---------|----------------|-------------------------------|---------|--------- +`name` | `string` | Filter name to use. | | yes +`values` | `list(string)` | Values to pass to the filter. | | yes -Refer to [List containers][List containers] from the Docker Engine API -documentation for the list of supported filters and their meaning. +Refer to [List containers][List containers] from the Docker Engine API documentation for the list of supported filters and their meaning. [List containers]: https://docs.docker.com/engine/api/v1.41/#tag/Container/operation/ContainerList ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the docker API. Each target includes the following labels: @@ -125,28 +117,21 @@ Each target includes the following labels: * `__meta_docker_container_label_`: Each label from the container. * `__meta_docker_network_id`: ID of the Docker network the container is in. * `__meta_docker_network_name`: Name of the Docker network the container is in. -* `__meta_docker_network_ingress`: Set to `true` if the Docker network is an - ingress network. -* `__meta_docker_network_internal`: Set to `true` if the Docker network is an - internal network. -* `__meta_docker_network_label_`: Each label from the network the - container is in. +* `__meta_docker_network_ingress`: Set to `true` if the Docker network is an ingress network. +* `__meta_docker_network_internal`: Set to `true` if the Docker network is an internal network. +* `__meta_docker_network_label_`: Each label from the network the container is in. * `__meta_docker_network_scope`: The scope of the network the container is in. * `__meta_docker_network_ip`: The IP of the container in the network. * `__meta_docker_port_private`: The private port on the container. -* `__meta_docker_port_public`: The publicly exposed port from the container, - if a port mapping exists. -* `__meta_docker_port_public_ip`: The public IP of the container, if a port - mapping exists. +* `__meta_docker_port_public`: The publicly exposed port from the container, if a port mapping exists. +* `__meta_docker_port_public_ip`: The public IP of the container, if a port mapping exists. -Each discovered container maps to one target per unique combination of networks -and port mappings used by the container. +Each discovered container maps to one target per unique combination of networks and port mappings used by the container. ## Component health -`discovery.docker` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.docker` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -160,8 +145,7 @@ values. ### Linux or macOS hosts -This example discovers Docker containers when the host machine is macOS or -Linux: +This example discovers Docker containers when the host machine is macOS or Linux: ```river discovery.docker "containers" { diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/reference/components/discovery.dockerswarm.md similarity index 79% rename from docs/sources/flow/reference/components/discovery.dockerswarm.md rename to docs/sources/reference/components/discovery.dockerswarm.md index d02a044f5c..e2bc5d9c3e 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/reference/components/discovery.dockerswarm.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dockerswarm/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dockerswarm/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.dockerswarm/ description: Learn about discovery.dockerswarm title: discovery.dockerswarm --- @@ -26,20 +21,20 @@ discovery.dockerswarm "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`host` | `string` | Address of the Docker daemon. | | yes -`role` | `string` | Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. | | yes +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------|---------|--------- +`host` | `string` | Address of the Docker daemon. | | yes +`role` | `string` | Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. | | yes `port` | `number` | The port to scrape metrics from, when `role` is nodes, and for discovered tasks and services that don't have published ports. | `80` | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -48,7 +43,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} [arguments]: #arguments @@ -95,19 +90,19 @@ The following arguments can be used to configure a filter. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -123,7 +118,9 @@ The `role` attribute decides the role of the targets to retrieve. ### services -The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. For each published port of a service, a single target is generated. If a service has no published ports, a target per service is created using the `port` attribute defined in the arguments. +The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. +For each published port of a service, a single target is generated. +If a service has no published ports, a target per service is created using the `port` attribute defined in the arguments. Available meta labels: @@ -145,7 +142,9 @@ Available meta labels: ### tasks -The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. For each published port of a task, a single target is generated. If a task has no published ports, a target per task is created using the `port` attribute defined in the arguments. +The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. +For each published port of a task, a single target is generated. +If a task has no published ports, a target per task is created using the `port` attribute defined in the arguments. Available meta labels: @@ -201,9 +200,8 @@ Available meta labels: ## Component health -`discovery.dockerswarm` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.dockerswarm` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/reference/components/discovery.ec2.md similarity index 62% rename from docs/sources/flow/reference/components/discovery.ec2.md rename to docs/sources/reference/components/discovery.ec2.md index 6345018f11..90ebb0109b 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/reference/components/discovery.ec2.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ec2/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ec2/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ec2/ description: Learn about discovery.ec2 title: discovery.ec2 --- @@ -26,24 +21,24 @@ discovery.ec2 "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`endpoint` | `string` | Custom endpoint to be used. | | no -`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no -`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no -`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no -`profile` | `string` | Named AWS profile used to connect to the API. | | no -`role_arn` | `string` | AWS Role Amazon Resource Name (ARN), an alternative to using AWS API keys. | | no -`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`endpoint` | `string` | Custom endpoint to be used. | | no +`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no +`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no +`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no +`profile` | `string` | Named AWS profile used to connect to the API. | | no +`role_arn` | `string` | AWS Role Amazon Resource Name (ARN), an alternative to using AWS API keys. | | no +`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -52,21 +47,21 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. - {{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} + {{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.ec2`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -filter | [filter][] | Filters discoverable resources. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +filter | [filter][] | Filters discoverable resources. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no [filter]: #filter-block [authorization]: #authorization-block @@ -75,17 +70,17 @@ tls_config | [tls_config][] | Configure TLS settings for connecting to the endpo ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### filter block Filters can be used optionally to filter the instance list by other criteria. Available filter criteria can be found in the [Amazon EC2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | Filter name to use. | | yes -`values` | `list(string)` | Values to pass to the filter. | | yes +Name | Type | Description | Default | Required +---------|----------------|-------------------------------|---------|--------- +`name` | `string` | Filter name to use. | | yes +`values` | `list(string)` | Values to pass to the filter. | | yes Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supported filters and their descriptions. @@ -93,11 +88,11 @@ Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supp ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -132,9 +127,8 @@ Each target includes the following labels: ## Component health -`discovery.ec2` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ec2` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/reference/components/discovery.eureka.md similarity index 61% rename from docs/sources/flow/reference/components/discovery.eureka.md rename to docs/sources/reference/components/discovery.eureka.md index 1cb3dd50da..dfcc6fea56 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/reference/components/discovery.eureka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.eureka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.eureka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.eureka/ description: Learn about discovery.eureka title: discovery.eureka --- @@ -27,18 +22,18 @@ discovery.eureka "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`server` | `string` | Eureka server URL. | | yes -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`server` | `string` | Eureka server URL. | | yes +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -49,19 +44,19 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.eureka`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -74,26 +69,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Eureka API. Each target includes the following labels: @@ -119,9 +114,8 @@ Each target includes the following labels: ## Component health -`discovery.eureka` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.eureka` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.file.md b/docs/sources/reference/components/discovery.file.md similarity index 71% rename from docs/sources/flow/reference/components/discovery.file.md rename to docs/sources/reference/components/discovery.file.md index 67335bf5e1..58308e683f 100644 --- a/docs/sources/flow/reference/components/discovery.file.md +++ b/docs/sources/reference/components/discovery.file.md @@ -1,25 +1,18 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.file/ description: Learn about discovery.file title: discovery.file --- # discovery.file -> **NOTE:** In {{< param "PRODUCT_ROOT_NAME" >}} `v0.35.0`, the `discovery.file` component was renamed to [local.file_match][], -> and `discovery.file` was repurposed to discover scrape targets from one or more files. -> ->
-> -> If you are trying to discover files on the local filesystem rather than scrape -> targets within a set of files, you should use [local.file_match][] instead. +{{< admonition type="note" >}} +In {{< param "PRODUCT_ROOT_NAME" >}} `v0.35.0`, the `discovery.file` component was renamed to [local.file_match][], and `discovery.file` was repurposed to discover scrape targets from one or more files. + +If you are trying to discover files on the local filesystem rather than scrape targets within a set of files, you should use [local.file_match][] instead. -[local.file_match]: {{< relref "./local.file_match.md" >}} +[local.file_match]: ../local.file_match/ +{{< /admonition >}} `discovery.file` discovers targets from a set of files, similar to the [Prometheus file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config). @@ -35,10 +28,10 @@ discovery.file "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------- | ------------------- | ------------------------------------------ |---------| -------- -`files` | `list(string)` | Files to read and discover targets from. | | yes -`refresh_interval` | `duration` | How often to sync targets. | "5m" | no +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------|---------|--------- +`files` | `list(string)` | Files to read and discover targets from. | | yes +`refresh_interval` | `duration` | How often to sync targets. | "5m" | no The last path segment of each element in `files` may contain a single * that matches any character sequence, e.g. `my/path/tg_*.json`. @@ -47,7 +40,7 @@ The last path segment of each element in `files` may contain a single * that mat The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -56,9 +49,8 @@ Each target includes the following labels: ## Component health -`discovery.file` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.file` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -102,8 +94,7 @@ values. ### Basic file discovery -This example discovers targets from a single file, scrapes them, and writes metrics -to a Prometheus remote write endpoint. +This example discovers targets from a single file, scrapes them, and writes metrics to a Prometheus remote write endpoint. ```river discovery.file "example" { @@ -134,8 +125,7 @@ Replace the following: ### File discovery with retained file path label -This example discovers targets from a wildcard file path, scrapes them, and writes metrics -to a Prometheus remote write endpoint. +This example discovers targets from a wildcard file path, scrapes them, and writes metrics to a Prometheus remote write endpoint. It also uses a relabeling rule to retain the file path as a label on each target. diff --git a/docs/sources/flow/reference/components/discovery.gce.md b/docs/sources/reference/components/discovery.gce.md similarity index 57% rename from docs/sources/flow/reference/components/discovery.gce.md rename to docs/sources/reference/components/discovery.gce.md index 182a19dfac..d47300d1f9 100644 --- a/docs/sources/flow/reference/components/discovery.gce.md +++ b/docs/sources/reference/components/discovery.gce.md @@ -1,26 +1,24 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.gce/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.gce/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.gce/ description: Learn about discovery.gce title: discovery.gce --- # discovery.gce -`discovery.gce` allows retrieving scrape targets from [Google Compute Engine](https://cloud.google.com/compute) (GCE) instances. The private IP address is used by default, but may be changed to the public IP address with relabeling. +`discovery.gce` allows retrieving scrape targets from [Google Compute Engine][] (GCE) instances. +The private IP address is used by default, but may be changed to the public IP address with relabeling. Credentials are discovered by the Google Cloud SDK default client by looking in the following places, preferring the first location found: -1. a JSON file specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. -2. a JSON file in the well-known path `$HOME/.config/gcloud/application_default_credentials.json`. -3. fetched from the GCE metadata server. +1. A JSON file specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. +2. A JSON file in the well-known path `$HOME/.config/gcloud/application_default_credentials.json`. +3. Fetched from the GCE metadata server. -If the Agent is running within GCE, the service account associated with the instance it is running on should have at least read-only permissions to the compute resources. If running outside of GCE make sure to create an appropriate service account and place the credential file in one of the expected locations. +If {{< param "PRODUCT_NAME" >}} is running within GCE, the service account associated with the instance it's running on should have at least read-only permissions to the compute resources. +If running outside of GCE make sure to create an appropriate service account and place the credential file in one of the expected locations. +[Google Compute Engine]: https://cloud.google.com/compute ## Usage @@ -35,23 +33,25 @@ discovery.gce "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`project` | `string` | The GCP Project.| | yes -`zone` | `string` | The zone of the scrape targets. | | yes -`filter` | `string` | Filter can be used optionally to filter the instance list by other criteria. | | no -`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `"60s"`| no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | `80`| no -`tag_separator` | `string` | The tag separator is used to separate the tags on concatenation. | `","`| no +Name | Type | Description | Default | Required +-------------------|------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`project` | `string` | The GCP Project. | | yes +`zone` | `string` | The zone of the scrape targets. | | yes +`filter` | `string` | Filter can be used optionally to filter the instance list by other criteria. | | no +`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `"60s"` | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | `80` | no +`tag_separator` | `string` | The tag separator is used to separate the tags on concatenation. | `","` | no -For more information on the syntax of the `filter` argument, refer to Google's `filter` documentation for [Method: instances.list](https://cloud.google.com/compute/docs/reference/latest/instances/list). +For more information on the syntax of the `filter` argument, refer to Google's `filter` documentation for [Method: instances.list][]. + +[Method: instances.list]: https://cloud.google.com/compute/docs/reference/latest/instances/list ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------- `targets` | `list(map(string))` | The set of discovered GCE targets. Each target includes the following labels: @@ -73,9 +73,8 @@ Each target includes the following labels: ## Component health -`discovery.gce` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.gce` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -113,7 +112,7 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. - + ## Compatible components diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/reference/components/discovery.hetzner.md similarity index 65% rename from docs/sources/flow/reference/components/discovery.hetzner.md rename to docs/sources/reference/components/discovery.hetzner.md index a18984696d..27637a983c 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/reference/components/discovery.hetzner.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.hetzner/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.hetzner/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.hetzner/ description: Learn about discovery.hetzner title: discovery.hetzner --- @@ -29,49 +24,48 @@ discovery.hetzner "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`role` | `string` | Hetzner role of entities that should be discovered. | | yes -`port` | `int` | The port to scrape metrics from. | `80` | no -`refresh_interval` | `duration` | The time after which the servers are refreshed. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`role` | `string` | Hetzner role of entities that should be discovered. | | yes +`port` | `int` | The port to scrape metrics from. | `80` | no +`refresh_interval` | `duration` | The time after which the servers are refreshed. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no `role` must be one of `robot` or `hcloud`. At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.hetzner`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -80,26 +74,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|------------------------------------------------------------ `targets` | `list(map(string))` | The set of targets discovered from the Hetzner catalog API. Each target includes the following labels: diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/reference/components/discovery.http.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.http.md rename to docs/sources/reference/components/discovery.http.md index 1ad2734eaf..11e723f6e4 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/reference/components/discovery.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.http/ description: Learn about discovery.http title: discovery.http --- @@ -13,7 +8,9 @@ title: discovery.http `discovery.http` provides a flexible way to define targets by querying an external http endpoint. -It fetches targets from an HTTP endpoint containing a list of zero or more target definitions. The target must reply with an HTTP 200 response. The HTTP header Content-Type must be application/json, and the body must be valid JSON. +It fetches targets from an HTTP endpoint containing a list of zero or more target definitions. +The target must reply with an HTTP 200 response. +The HTTP header Content-Type must be `application/json`, and the body must be valid JSON. Example response body: @@ -31,7 +28,7 @@ Example response body: It is possible to use additional fields in the JSON to pass parameters to [prometheus.scrape][] such as the `metricsPath` and `scrape_interval`. -[prometheus.scrape]: {{< relref "./prometheus.scrape.md#technical-details" >}} +[prometheus.scrape]: ../prometheus.scrape/#technical-details As an example, the following will provide a target with a custom `metricsPath`, scrape interval, and timeout value: @@ -53,7 +50,7 @@ As an example, the following will provide a target with a custom `metricsPath`, ``` -It is also possible to append query parameters to the metrics path with the `__param_` syntax. +It's also possible to append query parameters to the metrics path with the `__param_` syntax. For example, the following will call a metrics path of `/health?target_data=prometheus`: @@ -76,7 +73,9 @@ For example, the following will call a metrics path of `/health?target_data=prom ``` -For more information on the potential labels you can use, see the [prometheus.scrape technical details][prometheus.scrape] section, or the [Prometheus Configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) documentation. +For more information on the potential labels you can use, see the [prometheus.scrape technical details][prometheus.scrape] section, or the [Prometheus Configuration][] documentation. + +[Prometheus Configuration]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config ## Usage @@ -90,18 +89,18 @@ discovery.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`url` | `string` | URL to scrape. | | yes -`refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`url` | `string` | URL to scrape. | | yes +`refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -112,24 +111,23 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.http`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -138,26 +136,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -166,9 +164,8 @@ Each target includes the following labels: ## Component health -`discovery.http` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.http` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -180,7 +177,7 @@ values. ## Examples -This example will query a url every 15 seconds and expose targets that it finds: +This example will query a URL every 15 seconds and expose targets that it finds: ```river discovery.http "dynamic_targets" { diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/reference/components/discovery.ionos.md similarity index 73% rename from docs/sources/flow/reference/components/discovery.ionos.md rename to docs/sources/reference/components/discovery.ionos.md index 9bdaa6bc4d..4e4ee5e555 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/reference/components/discovery.ionos.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ionos/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ionos/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ionos/ description: Learn about discovery.ionos title: discovery.ionos --- @@ -27,19 +22,19 @@ discovery.ionos "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`datacenter_id` | `string` | The unique ID of the data center. | | yes -`refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no -`port` | `int` | The port to scrape metrics from. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`datacenter_id` | `string` | The unique ID of the data center. | | yes +`refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no +`port` | `int` | The port to scrape metrics from. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -50,7 +45,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks @@ -76,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -116,9 +111,8 @@ Each target includes the following labels: ## Component health -`discovery.ionos` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ionos` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/reference/components/discovery.kubelet.md similarity index 60% rename from docs/sources/flow/reference/components/discovery.kubelet.md rename to docs/sources/reference/components/discovery.kubelet.md index f9fef4a856..0ed182e6d7 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/reference/components/discovery.kubelet.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubelet/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubelet/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubelet/ description: Learn about discovery.kubelet labels: stage: beta @@ -13,8 +8,7 @@ title: discovery.kubelet # discovery.kubelet -`discovery.kubelet` discovers Kubernetes pods running on the specified Kubelet -and exposes them as scrape targets. +`discovery.kubelet` discovers Kubernetes pods running on the specified Kubelet and exposes them as scrape targets. ## Usage @@ -26,29 +20,30 @@ discovery.kubelet "LABEL" { ## Requirements * The Kubelet must be reachable from the `grafana-agent` pod network. -* Follow the [Kubelet authorization](https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization) - documentation to configure authentication to the Kubelet API. +* Follow the [Kubelet authorization][] documentation to configure authentication to the Kubelet API. + +[Kubelet authorization]: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization ## Arguments The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no -`refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no -`namespaces` | `list(string)` | A list of namespaces to extract target pods from | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no - -The `namespaces` list limits the namespaces to discover resources in. If -omitted, all namespaces are searched. +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------------------|--------- +`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no +`refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no +`namespaces` | `list(string)` | A list of namespaces to extract target pods from | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no + +The `namespaces` list limits the namespaces to discover resources in. +If omitted, all namespaces are searched. `discovery.kubelet` appends a `/pods` path to `url` to request the available pods. You can have additional paths in the `url`. @@ -63,24 +58,23 @@ For example, if `url` is `https://kubernetes.default.svc.cluster.local:443/api/v [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.kubelet`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -89,26 +83,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|---------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kubelet API. Each target includes the following labels: @@ -118,43 +112,32 @@ Each target includes the following labels: * `__meta_kubernetes_pod_name`: The name of the pod object. * `__meta_kubernetes_pod_ip`: The pod IP of the pod object. * `__meta_kubernetes_pod_label_`: Each label from the pod object. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the pod object. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - pod object. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the pod object. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. -* `__meta_kubernetes_pod_container_name`: Name of the container the target - address points to. -* `__meta_kubernetes_pod_container_id`: ID of the container the target address - points to. The ID is in the form `://`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the pod object. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. +* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. -* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container - port. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the pod object. * `__meta_kubernetes_pod_controller_kind`: Object kind of the pod controller. * `__meta_kubernetes_pod_controller_name`: Name of the pod controller. -> **Note**: The Kubelet API used by this component is an internal API and therefore the -> data in the response returned from the API cannot be guaranteed between different versions -> of the Kubelet. +{{< admonition type="note" >}} +The Kubelet API used by this component is an internal API and therefore the data in the response returned from the API can't be guaranteed between different versions of the Kubelet. +{{< /admonition >}} ## Component health -`discovery.kubelet` is reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kubelet` is reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/reference/components/discovery.kubernetes.md similarity index 61% rename from docs/sources/flow/reference/components/discovery.kubernetes.md rename to docs/sources/reference/components/discovery.kubernetes.md index 95d1d69a97..f9136d2ee1 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/reference/components/discovery.kubernetes.md @@ -1,23 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/ description: Learn about discovery.kubernetes title: discovery.kubernetes --- # discovery.kubernetes -`discovery.kubernetes` allows you to find scrape targets from Kubernetes -resources. It watches cluster state, and ensures targets are continually synced -with what is currently running in your cluster. +`discovery.kubernetes` allows you to find scrape targets from Kubernetes resources. +It watches cluster state, and ensures targets are continually synced with what is currently running in your cluster. -If you supply no connection information, this component defaults to an -in-cluster configuration. A kubeconfig file or manual connection settings can be used -to override the defaults. +If you supply no connection information, this component defaults to an in-cluster configuration. +A kubeconfig file or manual connection settings can be used to override the defaults. ## Usage @@ -31,19 +24,19 @@ discovery.kubernetes "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of Kubernetes API server. | | no -`role` | `string` | Type of Kubernetes resource to query. | | yes -`kubeconfig_file` | `string` | Path of kubeconfig file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of Kubernetes API server. | | no +`role` | `string` | Type of Kubernetes resource to query. | | yes +`kubeconfig_file` | `string` | Path of kubeconfig file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -54,19 +47,14 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} -The `role` argument is required to specify what type of targets to discover. -`role` must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice`, -or `ingress`. +The `role` argument is required to specify what type of targets to discover. `role` must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice`, or `ingress`. ### node role -The `node` role discovers one target per cluster node with the address -defaulting to the HTTP port of the Kubelet daemon. The target address defaults -to the first existing address of the Kubernetes node object in the address type -order of `NodeInternalIP`, `NodeExternalIP`, `NodeLegacyHostIP`, and -`NodeHostName`. +The `node` role discovers one target per cluster node with the address defaulting to the HTTP port of the Kubelet daemon. +The target address defaults to the first existing address of the Kubernetes node object in the address type order of `NodeInternalIP`, `NodeExternalIP`, `NodeLegacyHostIP`, and `NodeHostName`. The following labels are included for discovered nodes: @@ -75,53 +63,39 @@ The following labels are included for discovered nodes: * `__meta_kubernetes_node_label_`: Each label from the node object. * `__meta_kubernetes_node_labelpresent_`: Set to `true` for each label from the node object. * `__meta_kubernetes_node_annotation_`: Each annotation from the node object. -* `__meta_kubernetes_node_annotationpresent_`: Set to `true` - for each annotation from the node object. -* `__meta_kubernetes_node_address_`: The first address for each - node address type, if it exists. +* `__meta_kubernetes_node_annotationpresent_`: Set to `true` for each annotation from the node object. +* `__meta_kubernetes_node_address_`: The first address for each node address type, if it exists. -In addition, the `instance` label for the node will be set to the node name as -retrieved from the API server. +In addition, the `instance` label for the node will be set to the node name as retrieved from the API server. ### service role The `service` role discovers a target for each service port for each service. -This is generally useful for externally monitoring a service. The address will -be set to the Kubernetes DNS name of the service and respective service port. +This is generally useful for externally monitoring a service. +The address will be set to the Kubernetes DNS name of the service and respective service port. The following labels are included for discovered services: * `__meta_kubernetes_namespace`: The namespace of the service object. -* `__meta_kubernetes_service_annotation_`: Each annotation from - the service object. -* `__meta_kubernetes_service_annotationpresent_`: `true` for - each annotation of the service object. -* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the - service. This does not apply to services of type `ExternalName`. -* `__meta_kubernetes_service_external_name`: The DNS name of the service. - This only applies to services of type `ExternalName`. -* `__meta_kubernetes_service_label_`: Each label from the service - object. -* `__meta_kubernetes_service_labelpresent_`: `true` for each label - of the service object. +* `__meta_kubernetes_service_annotation_`: Each annotation from the service object. +* `__meta_kubernetes_service_annotationpresent_`: `true` for each annotation of the service object. +* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. This does not apply to services of type `ExternalName`. +* `__meta_kubernetes_service_external_name`: The DNS name of the service. This only applies to services of type `ExternalName`. +* `__meta_kubernetes_service_label_`: Each label from the service object. +* `__meta_kubernetes_service_labelpresent_`: `true` for each label of the service object. * `__meta_kubernetes_service_name`: The name of the service object. -* `__meta_kubernetes_service_port_name`: Name of the service port for the - target. -* `__meta_kubernetes_service_port_number`: Number of the service port for the - target. -* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for - the target. +* `__meta_kubernetes_service_port_name`: Name of the service port for the target. +* `__meta_kubernetes_service_port_number`: Number of the service port for the target. +* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target. * `__meta_kubernetes_service_type`: The type of the service. ### pod role -The `pod` role discovers all pods and exposes their containers as targets. For -each declared port of a container, a single target is generated. +The `pod` role discovers all pods and exposes their containers as targets. +For each declared port of a container, a single target is generated. -If a container has no specified ports, a port-free target per container is -created. These targets must have a port manually injected using a -[`discovery.relabel` component][discovery.relabel] before metrics can be -collected from them. +If a container has no specified ports, a port-free target per container is created. +These targets must have a port manually injected using a [`discovery.relabel` component][discovery.relabel] before metrics can be collected from them. The following labels are included for discovered pods: @@ -129,29 +103,19 @@ The following labels are included for discovered pods: * `__meta_kubernetes_pod_name`: The name of the pod object. * `__meta_kubernetes_pod_ip`: The pod IP of the pod object. * `__meta_kubernetes_pod_label_`: Each label from the pod object. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the pod object. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - pod object. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the pod object. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. -* `__meta_kubernetes_pod_container_name`: Name of the container the target - address points to. -* `__meta_kubernetes_pod_container_id`: ID of the container the target address - points to. The ID is in the form `://`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the pod object. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. +* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. -* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container - port. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the pod object. * `__meta_kubernetes_pod_controller_kind`: Object kind of the pod controller. @@ -168,86 +132,58 @@ The following labels are included for discovered endpoints: * `__meta_kubernetes_namespace:` The namespace of the endpoints object. * `__meta_kubernetes_endpoints_name:` The names of the endpoints object. -* `__meta_kubernetes_endpoints_label_`: Each label from the - endpoints object. -* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label - from the endpoints object. -* The following labels are attached for all targets discovered directly from - the endpoints list: +* `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object. +* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object. +* The following labels are attached for all targets discovered directly from the endpoints list: * `__meta_kubernetes_endpoint_hostname`: Hostname of the endpoint. - * `__meta_kubernetes_endpoint_node_name`: Name of the node hosting the - endpoint. - * `__meta_kubernetes_endpoint_ready`: Set to `true` or `false` for the - endpoint's ready state. + * `__meta_kubernetes_endpoint_node_name`: Name of the node hosting the endpoint. + * `__meta_kubernetes_endpoint_ready`: Set to `true` or `false` for the endpoint's ready state. * `__meta_kubernetes_endpoint_port_name`: Name of the endpoint port. * `__meta_kubernetes_endpoint_port_protocol`: Protocol of the endpoint port. - * `__meta_kubernetes_endpoint_address_target_kind`: Kind of the endpoint - address target. - * `__meta_kubernetes_endpoint_address_target_name`: Name of the endpoint - address target. -* If the endpoints belong to a service, all labels of the `service` role - discovery are attached. -* For all targets backed by a pod, all labels of the `pod` role discovery are - attached. + * `__meta_kubernetes_endpoint_address_target_kind`: Kind of the endpoint address target. + * `__meta_kubernetes_endpoint_address_target_name`: Name of the endpoint address target. +* If the endpoints belong to a service, all labels of the `service` role discovery are attached. +* For all targets backed by a pod, all labels of the `pod` role discovery are attached. ### endpointslice role -The endpointslice role discovers targets from existing Kubernetes endpoint -slices. For each endpoint address referenced in the `EndpointSlice` object, one -target is discovered. If the endpoint is backed by a pod, all container ports -of a pod are discovered as targets even if they are not bound to an endpoint -port. +The endpointslice role discovers targets from existing Kubernetes endpoint slices. +For each endpoint address referenced in the `EndpointSlice` object, one target is discovered. +If the endpoint is backed by a Pod, all container ports of a Pod are discovered as targets even if they are not bound to an endpoint port. The following labels are included for discovered endpoint slices: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpointslice_name`: The name of endpoint slice object. -* The following labels are attached for all targets discovered directly from - the endpoint slice list: - * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the - referenced object. - * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced - object. - * `__meta_kubernetes_endpointslice_address_type`: The IP protocol family of - the address of the target. - * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` - or `false` for the referenced endpoint's ready state. - * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: - Name of the node hosting the referenced endpoint. - * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: - `true` if the referenced object has a `kubernetes.io/hostname` annotation. +* The following labels are attached for all targets discovered directly from the endpoint slice list: + * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. + * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. + * `__meta_kubernetes_endpointslice_address_type`: The IP protocol family of the address of the target. + * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. + * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. + * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: `true` if the referenced object has a `kubernetes.io/hostname` annotation. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. - * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced - endpoint. - * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced - endpoint. -* If the endpoints belong to a service, all labels of the `service` role - discovery are attached. -* For all targets backed by a pod, all labels of the `pod` role discovery are - attached. + * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. + * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. +* If the endpoints belong to a service, all labels of the `service` role discovery are attached. +* For all targets backed by a pod, all labels of the `pod` role discovery are attached. ### ingress role -The `ingress` role discovers a target for each path of each ingress. This is -generally useful for externally monitoring an ingress. The address will be set -to the host specified in the Kubernetes `Ingress`'s `spec` block. +The `ingress` role discovers a target for each path of each ingress. +This is generally useful for externally monitoring an ingress. +The address will be set to the host specified in the Kubernetes `Ingress`'s `spec` block. The following labels are included for discovered ingress objects: * `__meta_kubernetes_namespace`: The namespace of the ingress object. * `__meta_kubernetes_ingress_name`: The name of the ingress object. -* `__meta_kubernetes_ingress_label_`: Each label from the ingress - object. -* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label - from the ingress object. -* `__meta_kubernetes_ingress_annotation_`: Each annotation from - the ingress object. -* `__meta_kubernetes_ingress_annotationpresent_`: `true` for each - annotation from the ingress object. -* `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if - present. -* `__meta_kubernetes_ingress_scheme`: Protocol scheme of ingress, `https` if TLS - config is set. Defaults to `http`. +* `__meta_kubernetes_ingress_label_`: Each label from the ingress object. +* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label from the ingress object. +* `__meta_kubernetes_ingress_annotation_`: Each annotation from the ingress object. +* `__meta_kubernetes_ingress_annotationpresent_`: `true` for each annotation from the ingress object. +* `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if present. +* `__meta_kubernetes_ingress_scheme`: Protocol scheme of ingress, `https` if TLS config is set. Defaults to `http`. * `__meta_kubernetes_ingress_path`: Path from ingress spec. Defaults to /. ## Blocks @@ -255,20 +191,19 @@ The following labels are included for discovered ingress objects: The following blocks are supported inside the definition of `discovery.kubernetes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -namespaces | [namespaces][] | Information about which Kubernetes namespaces to search. | no -selectors | [selectors][] | Information about which Kubernetes namespaces to search. | no -attach_metadata | [attach_metadata][] | Optional metadata to attach to discovered targets. | no -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no - -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +Hierarchy | Block | Description | Required +--------------------|---------------------|----------------------------------------------------------|--------- +namespaces | [namespaces][] | Information about which Kubernetes namespaces to search. | no +selectors | [selectors][] | Information about which Kubernetes namespaces to search. | no +attach_metadata | [attach_metadata][] | Optional metadata to attach to discovered targets. | no +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no + +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [namespaces]: #namespaces-block [selectors]: #selectors-block @@ -280,81 +215,79 @@ an `oauth2` block. ### namespaces block -The `namespaces` block limits the namespaces to discover resources in. If -omitted, all namespaces are searched. +The `namespaces` block limits the namespaces to discover resources in. +If omitted, all namespaces are searched. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`own_namespace` | `bool` | Include the namespace {{< param "PRODUCT_NAME" >}} is running in. | | no -`names` | `list(string)` | List of namespaces to search. | | no +Name | Type | Description | Default | Required +----------------|----------------|-------------------------------------------------------------------|---------|--------- +`own_namespace` | `bool` | Include the namespace {{< param "PRODUCT_NAME" >}} is running in. | | no +`names` | `list(string)` | List of namespaces to search. | | no ### selectors block -The `selectors` block contains optional label and field selectors to limit the -discovery process to a subset of resources. +The `selectors` block contains optional label and field selectors to limit the discovery process to a subset of resources. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`role` | `string` | Role of the selector. | | yes -`label`| `string` | Label selector string. | | no -`field` | `string` | Field selector string. | | no +Name | Type | Description | Default | Required +--------|----------|------------------------|---------|--------- +`role` | `string` | Role of the selector. | | yes +`label` | `string` | Label selector string. | | no +`field` | `string` | Field selector string. | | no -See Kubernetes' documentation for [Field selectors][] and [Labels and -selectors][] to learn more about the possible filters that can be used. +See Kubernetes' documentation for [Field selectors][] and [Labels and selectors][] to learn more about the possible filters that can be used. -The endpoints role supports pod, service, and endpoints selectors. -The pod role supports node selectors when configured with `attach_metadata: {node: true}`. +The endpoints role supports Pod, service, and endpoints selectors. +The Pod role supports node selectors when configured with `attach_metadata: {node: true}`. Other roles only support selectors matching the role itself (e.g. node role can only contain node selectors). -> **Note**: Using multiple `discovery.kubernetes` components with different -> selectors may result in a bigger load against the Kubernetes API. -> -> Selectors are recommended for retrieving a small set of resources in a very -> large cluster. Smaller clusters are recommended to avoid selectors in favor -> of filtering with [a `discovery.relabel` component][discovery.relabel] -> instead. +{{< admonition type="note" >}} +Using multiple `discovery.kubernetes` components with different selectors may result in a bigger load against the Kubernetes API. + +Selectors are recommended for retrieving a small set of resources in a very large cluster. +Smaller clusters are recommended to avoid selectors in favor of filtering with [a `discovery.relabel` component][discovery.relabel] instead. + +[discovery.relabel]: ../discovery.relabel/ +{{< /admonition >}} [Field selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ [Labels and selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -[discovery.relabel]: {{< relref "./discovery.relabel.md" >}} ### attach_metadata block -The `attach_metadata` block allows to attach node metadata to discovered -targets. Valid for roles: pod, endpoints, endpointslice. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`node` | `bool` | Attach node metadata. | | no +The `attach_metadata` block allows to attach node metadata to discovered targets. +Valid for roles: pod, endpoints, endpointslice. + +Name | Type | Description | Default | Required +-------|--------|-----------------------|---------|--------- +`node` | `bool` | Attach node metadata. | | no ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kubernetes API. ## Component health -`discovery.kubernetes` is reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kubernetes` is reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -398,7 +331,7 @@ Replace the following: ### Kubeconfig authentication -This example uses a kubeconfig file to authenticate to the Kubernetes API: +This example uses a Kubeconfig file to authenticate to the Kubernetes API: ```river discovery.kubernetes "k8s_pods" { @@ -473,7 +406,7 @@ This configuration could be useful if you are running {{< param "PRODUCT_ROOT_NA {{< admonition type="note" >}} This example assumes you have used Helm chart to deploy {{< param "PRODUCT_NAME" >}} in Kubernetes and sets `HOSTNAME` to the Kubernetes host name. -If you have a custom Kubernetes deployment, you must adapt this example to your configuration. +If you have a custom Kubernetes Deployment, you must adapt this example to your configuration. {{< /admonition >}} ```river diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/reference/components/discovery.kuma.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.kuma.md rename to docs/sources/reference/components/discovery.kuma.md index e4eb17e69b..720a0ed19b 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/reference/components/discovery.kuma.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kuma/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kuma/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kuma/ description: Learn about discovery.kuma title: discovery.kuma --- @@ -27,43 +22,42 @@ discovery.kuma "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | -------------------------------------------------------------- | ------- | -------- -`server` | `string` | Address of the Kuma Control Plane's MADS xDS server. | | yes -`refresh_interval` | `duration` | The time to wait between polling update requests. | `"30s"` | no -`fetch_timeout` | `duration` | The time after which the monitoring assignments are refreshed. | `"2m"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`server` | `string` | Address of the Kuma Control Plane's MADS xDS server. | | yes +`refresh_interval` | `duration` | The time to wait between polling update requests. | `"30s"` | no +`fetch_timeout` | `duration` | The time after which the monitoring assignments are refreshed. | `"2m"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} The following blocks are supported inside the definition of `discovery.kuma`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -72,19 +66,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -92,11 +86,10 @@ an `oauth2` block. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kuma API. -The following meta labels are available on targets and can be used by the -discovery.relabel component: +The following meta labels are available on targets and can be used by the discovery.relabel component: * `__meta_kuma_mesh`: the name of the proxy's Mesh * `__meta_kuma_dataplane`: the name of the proxy * `__meta_kuma_service`: the name of the proxy's associated Service @@ -104,9 +97,8 @@ discovery.relabel component: ## Component health -`discovery.kuma` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kuma` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.lightsail.md b/docs/sources/reference/components/discovery.lightsail.md similarity index 50% rename from docs/sources/flow/reference/components/discovery.lightsail.md rename to docs/sources/reference/components/discovery.lightsail.md index 81688b35a5..c6f959ff54 100644 --- a/docs/sources/flow/reference/components/discovery.lightsail.md +++ b/docs/sources/reference/components/discovery.lightsail.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.lightsail/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.lightsail/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.lightsail/ description: Learn about discovery.lightsail title: discovery.lightsail --- @@ -24,24 +24,24 @@ discovery.lightsail "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | Custom endpoint to be used.| | no -`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no -`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no -`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no -`profile` | `string` | Named AWS profile used to connect to the API. | | no -`role_arn` | `string` | AWS Role ARN, an alternative to using AWS API keys. | | no -`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`endpoint` | `string` | Custom endpoint to be used. | | no +`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no +`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no +`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no +`profile` | `string` | Named AWS profile used to connect to the API. | | no +`role_arn` | `string` | AWS Role ARN, an alternative to using AWS API keys. | | no +`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -52,24 +52,23 @@ At most, one of the following can be provided: [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.lightsail`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -78,26 +77,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------- `targets` | `list(map(string))` | The set of discovered Lightsail targets. Each target includes the following labels: @@ -116,9 +115,8 @@ Each target includes the following labels: ## Component health -`discovery.lightsail` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.lightsail` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/reference/components/discovery.linode.md similarity index 65% rename from docs/sources/flow/reference/components/discovery.linode.md rename to docs/sources/reference/components/discovery.linode.md index 9b0bffc553..175a241e6c 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/reference/components/discovery.linode.md @@ -1,17 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.linode/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.linode/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.linode/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.linode/ description: Learn about discovery.linode title: discovery.linode --- # discovery.linode -`discovery.linode` allows you to retrieve scrape targets from [Linode's](https://www.linode.com/) Linode APIv4. +`discovery.linode` allows you to retrieve scrape targets from [Linode's][] Linode APIv4. This service discovery uses the public IPv4 address by default, but that can be changed with relabeling. +[Linode's]: https://www.linode.com/ + ## Usage ```river @@ -28,19 +27,19 @@ The linode APIv4 Token must be created with the scopes: `linodes:read_only`, `ip The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`refresh_interval` | `duration` | The time to wait between polling update requests. | `"60s"` | no -`port` | `int` | Port that metrics are scraped from. | `80` | no -`tag_separator` | `string` | The string by which Linode Instance tags are joined into the tag label. | `,` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`refresh_interval` | `duration` | The time to wait between polling update requests. | `"60s"` | no +`port` | `int` | Port that metrics are scraped from. | `80` | no +`tag_separator` | `string` | The string by which Linode Instance tags are joined into the tag label. | `,` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -49,24 +48,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.linode`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -75,19 +73,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -95,11 +93,10 @@ an `oauth2` block. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Linode API. -The following meta labels are available on targets and can be used by the -discovery.relabel component: +The following meta labels are available on targets and can be used by the discovery.relabel component: * `__meta_linode_instance_id`: the id of the Linode instance * `__meta_linode_instance_label`: the label of the Linode instance @@ -122,9 +119,8 @@ discovery.relabel component: ## Component health -`discovery.linode` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.linode` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/reference/components/discovery.marathon.md similarity index 69% rename from docs/sources/flow/reference/components/discovery.marathon.md rename to docs/sources/reference/components/discovery.marathon.md index 69e8630b04..b13f4d728c 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/reference/components/discovery.marathon.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.marathon/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.marathon/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.marathon/ description: Learn about discovery.marathon title: discovery.marathon --- @@ -25,20 +20,20 @@ discovery.marathon "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`servers` | `list(string)` | List of Marathon servers. | | yes -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"30s"` | no -`auth_token` | `secret` | Auth token to authenticate with. | | no -`auth_token_file` | `string` | File containing an auth token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`servers` | `list(string)` | List of Marathon servers. | | yes +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"30s"` | no +`auth_token` | `secret` | Auth token to authenticate with. | | no +`auth_token_file` | `string` | File containing an auth token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`auth_token` argument](#arguments). @@ -51,7 +46,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks @@ -66,9 +61,8 @@ The following blocks are supported inside the definition of | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | | tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -77,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -111,9 +105,8 @@ Each target includes the following labels: ## Component health -`discovery.marathon` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.marathon` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.nerve.md b/docs/sources/reference/components/discovery.nerve.md similarity index 86% rename from docs/sources/flow/reference/components/discovery.nerve.md rename to docs/sources/reference/components/discovery.nerve.md index 04812c356b..9b7ebca64b 100644 --- a/docs/sources/flow/reference/components/discovery.nerve.md +++ b/docs/sources/reference/components/discovery.nerve.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nerve/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nerve/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nerve/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.nerve/ description: Learn about discovery.nerve title: discovery.nerve --- @@ -33,13 +30,11 @@ Name | Type | Description | Def `timeout` | `duration` | The timeout to use. | `"10s"` | no -Each element in the `path` list can either point to a single service, or to the -root of a tree of services. +Each element in the `path` list can either point to a single service, or to the root of a tree of services. ## Blocks -The `discovery.nerve` component does not support any blocks, and is configured -fully through arguments. +The `discovery.nerve` component does not support any blocks, and is configured fully through arguments. ## Exported fields @@ -49,8 +44,7 @@ Name | Type | Description --------- | ------------------- | ----------- `targets` | `list(map(string))` | The set of targets discovered from Nerve's API. -The following meta labels are available on targets and can be used by the -discovery.relabel component +The following meta labels are available on targets and can be used by the discovery.relabel component * `__meta_nerve_path`: the full path to the endpoint node in Zookeeper * `__meta_nerve_endpoint_host`: the host of the endpoint * `__meta_nerve_endpoint_port`: the port of the endpoint @@ -58,9 +52,8 @@ discovery.relabel component ## Component health -`discovery.nerve` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.nerve` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/reference/components/discovery.nomad.md similarity index 53% rename from docs/sources/flow/reference/components/discovery.nomad.md rename to docs/sources/reference/components/discovery.nomad.md index 372306a4e2..71eeb221ee 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/reference/components/discovery.nomad.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nomad/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nomad/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.nomad/ description: Learn about discovery.nomad title: discovery.nomad --- @@ -24,50 +19,49 @@ discovery.nomad "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ----------------------- | -------- -`server` | `string` | Address of nomad server. | `http://localhost:4646` | no -`namespace` | `string` | Nomad namespace to use. | `default` | no -`region` | `string` | Nomad region to use. | `global` | no -`allow_stale` | `bool` | Allow reading from non-leader nomad instances. | `true` | no -`tag_separator` | `string` | Seperator to join nomad tags into Prometheus labels. | `,` | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-------------------------|--------- +`server` | `string` | Address of nomad server. | `http://localhost:4646` | no +`namespace` | `string` | Nomad namespace to use. | `default` | no +`region` | `string` | Nomad region to use. | `global` | no +`allow_stale` | `bool` | Allow reading from non-leader nomad instances. | `true` | no +`tag_separator` | `string` | Seperator to join nomad tags into Prometheus labels. | `,` | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.nomad`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -76,26 +70,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the nomad server. Each target includes the following labels: @@ -112,9 +106,8 @@ Each target includes the following labels: ## Component health -`discovery.nomad` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.nomad` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.openstack.md b/docs/sources/reference/components/discovery.openstack.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.openstack.md rename to docs/sources/reference/components/discovery.openstack.md index 6d26908602..894c66101a 100644 --- a/docs/sources/flow/reference/components/discovery.openstack.md +++ b/docs/sources/reference/components/discovery.openstack.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.openstack/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.openstack/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.openstack/ description: Learn about discovery.openstack title: discovery.openstack --- @@ -28,33 +23,35 @@ discovery.openstack "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required -------------------- | ---------- | ---------------------------------------------------------------------- | -------------------- | -------- -`role` | `string` | Role of the discovered targets. | | yes -`region` | `string` | OpenStack region. | | yes -`identity_endpoint` | `string` | Specifies the HTTP endpoint that is required to work with te Identity API of the appropriate version | | no -`username` | `string` | OpenStack username for the Identity V2 and V3 APIs. | | no -`userid` | `string` | OpenStack userid for the Identity V2 and V3 APIs. | | no -`password` | `secret` | Password for the Identity V2 and V3 APIs. | | no -`domain_name` | `string` | OpenStack domain name for the Identity V2 and V3 APIs. | | no -`domain_id` | `string` | OpenStack domain ID for the Identity V2 and V3 APIs. | | no -`project_name` | `string` | OpenStack project name for the Identity V2 and V3 APIs. | | no -`project_id` | `string` | OpenStack project ID for the Identity V2 and V3 APIs. | | no -`application_credential_name` | `string` | OpenStack application credential name for the Identity V2 and V3 APIs. | | no -`application_credential_id` | `string` | OpenStack application credential ID for the Identity V2 and V3 APIs. | | no -`application_credential_secret` | `secret` | OpenStack application credential secret for the Identity V2 and V3 APIs. | | no -`all_tenants` | `bool` | Whether the service discovery should list all instances for all projects. | `false` | no -`refresh_interval` | `duration`| Refresh interval to re-read the instance list. | `60s` | no -`port` | `int` | The port to scrape metrics from. | `80` | no -`availability` | `string` | The availability of the endpoint to connect to. | `public` | no +Name | Type | Description | Default | Required +--------------------------------|------------|------------------------------------------------------------------------------------------------------|----------|--------- +`role` | `string` | Role of the discovered targets. | | yes +`region` | `string` | OpenStack region. | | yes +`identity_endpoint` | `string` | Specifies the HTTP endpoint that is required to work with te Identity API of the appropriate version | | no +`username` | `string` | OpenStack username for the Identity V2 and V3 APIs. | | no +`userid` | `string` | OpenStack userid for the Identity V2 and V3 APIs. | | no +`password` | `secret` | Password for the Identity V2 and V3 APIs. | | no +`domain_name` | `string` | OpenStack domain name for the Identity V2 and V3 APIs. | | no +`domain_id` | `string` | OpenStack domain ID for the Identity V2 and V3 APIs. | | no +`project_name` | `string` | OpenStack project name for the Identity V2 and V3 APIs. | | no +`project_id` | `string` | OpenStack project ID for the Identity V2 and V3 APIs. | | no +`application_credential_name` | `string` | OpenStack application credential name for the Identity V2 and V3 APIs. | | no +`application_credential_id` | `string` | OpenStack application credential ID for the Identity V2 and V3 APIs. | | no +`application_credential_secret` | `secret` | OpenStack application credential secret for the Identity V2 and V3 APIs. | | no +`all_tenants` | `bool` | Whether the service discovery should list all instances for all projects. | `false` | no +`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `60s` | no +`port` | `int` | The port to scrape metrics from. | `80` | no +`availability` | `string` | The availability of the endpoint to connect to. | `public` | no `role` must be one of `hypervisor` or `instance`. `username` is required if using Identity V2 API. In Identity V3, either `userid` or a combination of `username` and `domain_id` or `domain_name` are needed. -`project_id` and `project_name` fields are optional for the Identity V2 API. Some providers allow you to specify a `project_name` instead of the `project_id`. Some require both. +`project_id` and `project_name` fields are optional for the Identity V2 API. +Some providers allow you to specify a `project_name` instead of the `project_id`. Some require both. -`application_credential_id` or `application_credential_name` fields are required if using an application credential to authenticate. Some providers allow you to create an application credential to authenticate rather than a password. +`application_credential_id` or `application_credential_name` fields are required if using an application credential to authenticate. +Some providers allow you to create an application credential to authenticate rather than a password. `application_credential_secret` field is required if using an application credential to authenticate. @@ -65,22 +62,22 @@ Name | Type | Description ## Blocks The following blocks are supported inside the definition of `discovery.openstack`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|------------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the OpenStack API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------------------------------ `targets` | `list(map(string))` | The set of targets discovered from the OpenStack API. #### `hypervisor` @@ -115,9 +112,8 @@ interface. ## Component health -`discovery.openstack` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.openstack` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/reference/components/discovery.ovhcloud.md similarity index 88% rename from docs/sources/flow/reference/components/discovery.ovhcloud.md rename to docs/sources/reference/components/discovery.ovhcloud.md index 2733256ee1..5582f9d121 100644 --- a/docs/sources/flow/reference/components/discovery.ovhcloud.md +++ b/docs/sources/reference/components/discovery.ovhcloud.md @@ -1,20 +1,15 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ovhcloud/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ovhcloud/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ovhcloud/ description: Learn about discovery.ovhcloud title: discovery.ovhcloud --- # discovery.ovhcloud -`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. -{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. -The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. -This may be changed via relabeling with `discovery.relabel`. +`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. +{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. +The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. +This may be changed via relabeling with `discovery.relabel`. For OVHcloud's [public cloud][] instances you can use `discovery.openstack`. [API]: https://api.ovh.com/ @@ -57,7 +52,7 @@ service | `string` | Service of the targets to retrieve. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|----------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the OVHcloud API. Multiple meta labels are available on `targets` and can be used by the `discovery.relabel` component. @@ -99,9 +94,8 @@ Multiple meta labels are available on `targets` and can be used by the `discover ## Component health -`discovery.ovhcloud` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ovhcloud` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.process.md b/docs/sources/reference/components/discovery.process.md similarity index 83% rename from docs/sources/flow/reference/components/discovery.process.md rename to docs/sources/reference/components/discovery.process.md index 6749abe65a..12df00f41f 100644 --- a/docs/sources/flow/reference/components/discovery.process.md +++ b/docs/sources/reference/components/discovery.process.md @@ -1,17 +1,12 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.process/ description: Learn about discovery.process title: discovery.process --- # discovery.process -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `discovery.process` discovers processes running on the local Linux OS. @@ -31,10 +26,10 @@ discovery.process "LABEL" { The following arguments are supported: -| Name | Type | Description | Default | Required | -|--------------------|---------------------|-----------------------------------------------------------------------------------------|---------|----------| +| Name | Type | Description | Default | Required | +|--------------------|---------------------|------------------------------------------------------------------------------------------|---------|----------| | `join` | `list(map(string))` | Join external targets to discovered processes targets based on `__container_id__` label. | | no | -| `refresh_interval` | `duration` | How often to sync targets. | "60s" | no | +| `refresh_interval` | `duration` | How often to sync targets. | "60s" | no | ### Targets joining @@ -97,8 +92,8 @@ The resulting targets are: The following blocks are supported inside the definition of `discovery.process`: -| Hierarchy | Block | Description | Required | -|-----------------|---------------------|-----------------------------------------------|----------| +| Hierarchy | Block | Description | Required | +|-----------------|---------------------|------------------------------------------------|----------| | discover_config | [discover_config][] | Configures which process metadata to discover. | no | [discover_config]: #discover_config-block @@ -109,13 +104,13 @@ The `discover_config` block describes which process metadata to discover. The following arguments are supported: -| Name | Type | Description | Default | Required | -|----------------|--------|-----------------------------------------------------------------|---------|----------| -| `exe` | `bool` | A flag to enable discovering `__meta_process_exe` label. | true | no | +| Name | Type | Description | Default | Required | +|----------------|--------|------------------------------------------------------------------|---------|----------| +| `exe` | `bool` | A flag to enable discovering `__meta_process_exe` label. | true | no | | `cwd` | `bool` | A flag to enable discovering `__meta_process_cwd` label. | true | no | | `commandline` | `bool` | A flag to enable discovering `__meta_process_commandline` label. | true | no | | `uid` | `bool` | A flag to enable discovering `__meta_process_uid`: label. | true | no | -| `username` | `bool` | A flag to enable discovering `__meta_process_username`: label. | true | no | +| `username` | `bool` | A flag to enable discovering `__meta_process_username`: label. | true | no | | `container_id` | `bool` | A flag to enable discovering `__container_id__` label. | true | no | ## Exported fields @@ -134,14 +129,12 @@ Each target includes the following labels: * `__meta_process_commandline`: The process command line. Taken from `/proc//cmdline`. * `__meta_process_uid`: The process UID. Taken from `/proc//status`. * `__meta_process_username`: The process username. Taken from `__meta_process_uid` and `os/user/LookupID`. -* `__container_id__`: The container ID. Taken from `/proc//cgroup`. If the process is not running in a container, - this label is not set. +* `__container_id__`: The container ID. Taken from `/proc//cgroup`. If the process is not running in a container, this label is not set. ## Component health -`discovery.process` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.process` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/reference/components/discovery.puppetdb.md similarity index 75% rename from docs/sources/flow/reference/components/discovery.puppetdb.md rename to docs/sources/reference/components/discovery.puppetdb.md index 01e0ac9269..3ef7fc40e3 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/reference/components/discovery.puppetdb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.puppetdb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.puppetdb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.puppetdb/ description: Learn about discovery.puppetdb title: discovery.puppetdb --- @@ -49,31 +44,30 @@ Name | Type | Description At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.puppetdb`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -82,26 +76,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from puppetdb. Each target includes the following labels: @@ -119,9 +113,8 @@ Each target includes the following labels: ## Component health -`discovery.puppetdb` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.puppetdb` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.relabel.md b/docs/sources/reference/components/discovery.relabel.md similarity index 53% rename from docs/sources/flow/reference/components/discovery.relabel.md rename to docs/sources/reference/components/discovery.relabel.md index cd928ffb5a..2cb341cfdb 100644 --- a/docs/sources/flow/reference/components/discovery.relabel.md +++ b/docs/sources/reference/components/discovery.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/ description: Learn about discovery.relabel title: discovery.relabel --- @@ -13,29 +8,20 @@ title: discovery.relabel In Flow, targets are defined as sets of key-value pairs called _labels_. -`discovery.relabel` rewrites the label set of the input targets by applying one -or more relabeling rules. If no rules are defined, then the input targets are -exported as-is. - -The most common use of `discovery.relabel` is to filter targets or standardize -the target label set that is passed to a downstream component. The `rule` -blocks are applied to the label set of each target in order of their appearance -in the configuration file. The configured rules can be retrieved by calling the -function in the `rules` export field. - -Target labels which start with a double underscore `__` are considered -internal, and may be removed by other Flow components prior to telemetry -collection. To retain any of these labels, use a `labelmap` action to remove -the prefix, or remap them to a different name. Service discovery mechanisms -usually group their labels under `__meta_*`. For example, the -discovery.kubernetes component populates a set of `__meta_kubernetes_*` labels -to provide information about the discovered Kubernetes resources. If a -relabeling rule needs to store a label value temporarily, for example as the -input to a subsequent step, use the `__tmp` label name prefix, as it is -guaranteed to never be used. - -Multiple `discovery.relabel` components can be specified by giving them -different labels. +`discovery.relabel` rewrites the label set of the input targets by applying one or more relabeling rules. +If no rules are defined, then the input targets are exported as-is. + +The most common use of `discovery.relabel` is to filter targets or standardize the target label set that's passed to a downstream component. +The `rule` blocks are applied to the label set of each target in order of their appearance in the configuration file. +The configured rules can be retrieved by calling the function in the `rules` export field. + +Target labels which start with a double underscore `__` are considered internal, and may be removed by other components prior to telemetry collection. +To retain any of these labels, use a `labelmap` action to remove the prefix, or remap them to a different name. +Service discovery mechanisms usually group their labels under `__meta_*`. +For example, the discovery.kubernetes component populates a set of `__meta_kubernetes_*` labels to provide information about the discovered Kubernetes resources. +If a relabeling rule needs to store a label value temporarily, for example as the input to a subsequent step, use the `__tmp` label name prefix, as it's guaranteed to never be used. + +Multiple `discovery.relabel` components can be specified by giving them different labels. ## Usage @@ -55,39 +41,38 @@ discovery.relabel "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | Targets to relabel | | yes +Name | Type | Description | Default | Required +----------|---------------------|--------------------|---------|--------- +`targets` | `list(map(string))` | Targets to relabel | | yes ## Blocks The following blocks are supported inside the definition of `discovery.relabel`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -rule | [rule][] | Relabeling rules to apply to targets. | no +Hierarchy | Block | Description | Required +----------|----------|---------------------------------------|--------- +rule | [rule][] | Relabeling rules to apply to targets. | no [rule]: #rule-block ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +---------|---------------------|---------------------------------------------- `output` | `list(map(string))` | The set of targets after applying relabeling. -`rules` | `RelabelRules` | The currently configured relabeling rules. +`rules` | `RelabelRules` | The currently configured relabeling rules. ## Component health -`discovery.relabel` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.relabel` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -122,7 +107,6 @@ discovery.relabel "keep_backend_only" { } ``` - ## Compatible components diff --git a/docs/sources/flow/reference/components/discovery.scaleway.md b/docs/sources/reference/components/discovery.scaleway.md similarity index 55% rename from docs/sources/flow/reference/components/discovery.scaleway.md rename to docs/sources/reference/components/discovery.scaleway.md index 44c1810118..9f36c82e33 100644 --- a/docs/sources/flow/reference/components/discovery.scaleway.md +++ b/docs/sources/reference/components/discovery.scaleway.md @@ -1,16 +1,12 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.scaleway/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.scaleway/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.scaleway/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.scaleway/ description: Learn about discovery.scaleway title: discovery.scaleway --- # discovery.scaleway -`discovery.scaleway` discovers targets from [Scaleway instances][instance] and -[baremetal services][baremetal]. +`discovery.scaleway` discovers targets from [Scaleway instances][instance] and [baremetal services][baremetal]. [instance]: https://www.scaleway.com/en/virtual-instances/ [baremetal]: https://www.scaleway.com/en/bare-metal-servers/ @@ -30,64 +26,61 @@ discovery.scaleway "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`project_id` | `string` | Scaleway project ID of targets. | | yes -`role` | `string` | Role of targets to retrieve. | | yes -`api_url` | `string` | Scaleway API URL. | `"https://api.scaleway.com"` | no -`zone` | `string` | Availability zone of targets. | `"fr-par-1"` | no -`access_key` | `string` | Access key for the Scaleway API. | | yes -`secret_key` | `secret` | Secret key for the Scaleway API. | | conditional -`secret_key_file` | `string` | Path to file containing secret key for the Scaleway API. | | conditional -`name_filter` | `string` | Name filter to apply against the listing request. | | no -`tags_filter` | `list(string)` | List of tags to search for. | | no -`refresh_interval` | `duration` | Frequency to rediscover targets. | `"60s"` | no -`port` | `number` | Default port on servers to associate with generated targets. | `80` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -The `role` argument determines what type of Scaleway machines to discover. It -must be set to one of the following: +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|------------------------------|------------ +`project_id` | `string` | Scaleway project ID of targets. | | yes +`role` | `string` | Role of targets to retrieve. | | yes +`api_url` | `string` | Scaleway API URL. | `"https://api.scaleway.com"` | no +`zone` | `string` | Availability zone of targets. | `"fr-par-1"` | no +`access_key` | `string` | Access key for the Scaleway API. | | yes +`secret_key` | `secret` | Secret key for the Scaleway API. | | conditional +`secret_key_file` | `string` | Path to file containing secret key for the Scaleway API. | | conditional +`name_filter` | `string` | Name filter to apply against the listing request. | | no +`tags_filter` | `list(string)` | List of tags to search for. | | no +`refresh_interval` | `duration` | Frequency to rediscover targets. | `"60s"` | no +`port` | `number` | Default port on servers to associate with generated targets. | `80` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +The `role` argument determines what type of Scaleway machines to discover. +It must be set to one of the following: * `"baremetal"`: Discover [baremetal][] Scaleway machines. * `"instance"`: Discover virtual Scaleway [instances][instance]. -The `name_filter` and `tags_filter` arguments can be used to filter the set of -discovered servers. `name_filter` returns machines matching a specific name, -while `tags_filter` returns machines who contain _all_ the tags listed in the -`tags_filter` argument. +The `name_filter` and `tags_filter` arguments can be used to filter the set of discovered servers. +`name_filter` returns machines matching a specific name, while `tags_filter` returns machines who contain _all_ the tags listed in the `tags_filter` argument. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.scaleway`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|--------------------------------------------------------|--------- tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Consul catalog API. When `role` is `baremetal`, discovered targets include the following labels: @@ -131,9 +124,8 @@ When `role` is `instance`, discovered targets include the following labels: ## Component health -`discovery.scaleway` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.scaleway` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.serverset.md b/docs/sources/reference/components/discovery.serverset.md similarity index 76% rename from docs/sources/flow/reference/components/discovery.serverset.md rename to docs/sources/reference/components/discovery.serverset.md index bf45a1d79a..e693c9f3cf 100644 --- a/docs/sources/flow/reference/components/discovery.serverset.md +++ b/docs/sources/reference/components/discovery.serverset.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.serverset/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.serverset/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.serverset/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.serverset/ description: Learn about discovery.serverset title: discovery.serverset --- @@ -25,7 +22,8 @@ discovery.serverset "LABEL" { } ``` -Serverset data stored in Zookeeper must be in JSON format. The Thrift format is not supported. +Serverset data stored in Zookeeper must be in JSON format. +The Thrift format isn't supported. ## Arguments @@ -33,16 +31,16 @@ The following arguments are supported: | Name | Type | Description | Default | Required | |-----------|----------------|--------------------------------------------------|---------|----------| -| `servers` | `list(string)` | The Zookeeper servers to connect to. | | yes | +| `servers` | `list(string)` | The Zookeeper servers to connect to. | | yes | | `paths` | `list(string)` | The Zookeeper paths to discover Serversets from. | | yes | -| `timeout` | `duration` | The Zookeeper session timeout | `10s` | no | +| `timeout` | `duration` | The Zookeeper session timeout | `10s` | no | ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------- `targets` | `list(map(string))` | The set of targets discovered. The following metadata labels are available on targets during relabeling: @@ -56,9 +54,8 @@ The following metadata labels are available on targets during relabeling: ## Component health -`discovery.serverset` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.serverset` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -70,12 +67,8 @@ values. ## Example -The configuration below will connect to one of the Zookeeper servers -(either `zk1`, `zk2`, or `zk3`) and discover JSON Serversets at paths -`/path/to/znode1` and `/path/to/znode2`. The discovered targets are scraped -by the `prometheus.scrape.default` component and forwarded to -the `prometheus.remote_write.default` component, which will send the samples to -specified remote_write URL. +The configuration below will connect to one of the Zookeeper servers (either `zk1`, `zk2`, or `zk3`) and discover JSON Serversets at paths `/path/to/znode1` and `/path/to/znode2`. +The discovered targets are scraped by the `prometheus.scrape.default` component and forwarded to the `prometheus.remote_write.default` component, which will send the samples to specified remote_write URL. ```river discovery.serverset "zookeeper" { diff --git a/docs/sources/flow/reference/components/discovery.triton.md b/docs/sources/reference/components/discovery.triton.md similarity index 83% rename from docs/sources/flow/reference/components/discovery.triton.md rename to docs/sources/reference/components/discovery.triton.md index d9e3ac6a23..82578eee6f 100644 --- a/docs/sources/flow/reference/components/discovery.triton.md +++ b/docs/sources/reference/components/discovery.triton.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.triton/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.triton/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.triton/ description: Learn about discovery.triton title: discovery.triton --- @@ -44,29 +39,29 @@ Name | Type | Description * `"container"` to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton * `"cn"` to discover compute nodes (servers/global zones) making up the Triton infrastructure -`groups` is only supported when `role` is set to `"container"`. If omitted all -containers owned by the requesting account are scraped. +`groups` is only supported when `role` is set to `"container"`. +If omitted all containers owned by the requesting account are scraped. ## Blocks The following blocks are supported inside the definition of `discovery.triton`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|---------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the Triton API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Triton API. When `role` is set to `"container"`, each target includes the following labels: @@ -85,9 +80,8 @@ When `role` is set to `"cn"` each target includes the following labels: ## Component health -`discovery.triton` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.triton` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.uyuni.md b/docs/sources/reference/components/discovery.uyuni.md similarity index 62% rename from docs/sources/flow/reference/components/discovery.uyuni.md rename to docs/sources/reference/components/discovery.uyuni.md index ab2a968bb5..8cbd7d6870 100644 --- a/docs/sources/flow/reference/components/discovery.uyuni.md +++ b/docs/sources/reference/components/discovery.uyuni.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.uyuni/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.uyuni/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.uyuni/ description: Learn about discovery.uyuni title: discovery.uyuni --- @@ -29,43 +24,43 @@ discovery.uyuni "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------------- | ----------------------- | -------- -`server` | `string` | The primary Uyuni Server. | | yes -`username` | `string` | The username to use for authentication to the Uyuni API. | | yes -`password` | `Secret` | The password to use for authentication to the Uyuni API. | | yes -`entitlement` | `string` | The entitlement to filter on when listing targets. | `"monitoring_entitled"` | no -`separator` | `string` | The separator to use when building the `__meta_uyuni_groups` label. | `","` | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `1m` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-------------------------|--------- +`server` | `string` | The primary Uyuni Server. | | yes +`username` | `string` | The username to use for authentication to the Uyuni API. | | yes +`password` | `Secret` | The password to use for authentication to the Uyuni API. | | yes +`entitlement` | `string` | The entitlement to filter on when listing targets. | `"monitoring_entitled"` | no +`separator` | `string` | The separator to use when building the `__meta_uyuni_groups` label. | `","` | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `1m` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.uyuni`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|--------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the Uyuni API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|-------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Uyuni API. Each target includes the following labels: @@ -80,14 +75,14 @@ Each target includes the following labels: * `__meta_uyuni_metrics_path`: The path to the metrics endpoint. * `__meta_uyuni_scheme`: `https` if TLS is enabled on the endpoint, `http` otherwise. -These labels are largely derived from a [listEndpoints](https://www.uyuni-project.org/uyuni-docs-api/uyuni/api/system.monitoring.html) -API call to the Uyuni Server. +These labels are largely derived from a [listEndpoints][] API call to the Uyuni Server. + +[listEndpoints]: https://www.uyuni-project.org/uyuni-docs-api/uyuni/api/system.monitoring.html ## Component health -`discovery.uyuni` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.uyuni` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -128,6 +123,7 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + ## Compatible components diff --git a/docs/sources/reference/components/faro.receiver.md b/docs/sources/reference/components/faro.receiver.md new file mode 100644 index 0000000000..a81744e1e5 --- /dev/null +++ b/docs/sources/reference/components/faro.receiver.md @@ -0,0 +1,258 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/faro.receiver/ +description: Learn about the faro.receiver +title: faro.receiver +--- + +# faro.receiver + +`faro.receiver` accepts web application telemetry data from the [Grafana Faro Web SDK][faro-sdk] and forwards it to other components for future processing. + +[faro-sdk]: https://github.com/grafana/faro-web-sdk + +## Usage + +```river +faro.receiver "LABEL" { + output { + logs = [LOKI_RECEIVERS] + traces = [OTELCOL_COMPONENTS] + } +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +-------------------|---------------|----------------------------------------------|---------|--------- +`extra_log_labels` | `map(string)` | Extra labels to attach to emitted log lines. | `{}` | no + +## Blocks + +The following blocks are supported inside the definition of `faro.receiver`: + +Hierarchy | Block | Description | Required +-----------------------|-------------------|------------------------------------------------------|--------- +server | [server][] | Configures the HTTP server. | no +server > rate_limiting | [rate_limiting][] | Configures rate limiting for the HTTP server. | no +sourcemaps | [sourcemaps][] | Configures sourcemap retrieval. | no +sourcemaps > location | [location][] | Configures on-disk location for sourcemap retrieval. | no +output | [output][] | Configures where to send collected telemetry data. | yes + +[server]: #server-block +[rate_limiting]: #rate_limiting-block +[sourcemaps]: #sourcemaps-block +[location]: #location-block +[output]: #output-block + +### server block + +The `server` block configures the HTTP server managed by the `faro.receiver` component. +Clients using the [Grafana Faro Web SDK][faro-sdk] forward telemetry data to this HTTP server for processing. + +Name | Type | Description | Default | Required +---------------------------|----------------|--------------------------------------------------------|-------------|--------- +`listen_address` | `string` | Address to listen for HTTP traffic on. | `127.0.0.1` | no +`listen_port` | `number` | Port to listen for HTTP traffic on. | `12347` | no +`cors_allowed_origins` | `list(string)` | Origins for which cross-origin requests are permitted. | `[]` | no +`api_key` | `secret` | Optional API key to validate client requests with. | `""` | no +`max_allowed_payload_size` | `string` | Maximum size (in bytes) for client requests. | `"5MiB"` | no + +By default, telemetry data is only accepted from applications on the same local network as the browser. +To accept telemetry data from a wider set of clients, modify the `listen_address` attribute to the IP address of the appropriate network interface to use. + +The `cors_allowed_origins` argument determines what origins browser requests may come from. +The default value, `[]`, disables CORS support. +To support requests from all origins, set `cors_allowed_origins` to `["*"]`. +The `*` character indicates a wildcard. + +When the `api_key` argument is non-empty, client requests must have an HTTP header called `X-API-Key` matching the value of the `api_key` argument. +Requests that are missing the header or have the wrong value are rejected with an `HTTP 401 Unauthorized` status code. +If the `api_key` argument is empty, no authentication checks are performed, and the `X-API-Key` HTTP header is ignored. + +### rate_limiting block + +The `rate_limiting` block configures rate limiting for client requests. + +Name | Type | Description | Default | Required +-------------|----------|--------------------------------------|---------|--------- +`enabled` | `bool` | Whether to enable rate limiting. | `true` | no +`rate` | `number` | Rate of allowed requests per second. | `50` | no +`burst_size` | `number` | Allowed burst size of requests. | `100` | no + +Rate limiting functions as a [token bucket algorithm][token-bucket], where a bucket has a maximum capacity for up to `burst_size` requests and refills at a rate of `rate` per second. + +Each HTTP request drains the capacity of the bucket by one. Once the bucket is empty, HTTP requests are rejected with an `HTTP 429 Too Many Requests` status code until the bucket has more available capacity. + +Configuring the `rate` argument determines how fast the bucket refills, and configuring the `burst_size` argument determines how many requests can be received in a burst before the bucket is empty and starts rejecting requests. + +[token-bucket]: https://en.wikipedia.org/wiki/Token_bucket + +### sourcemaps block + +The `sourcemaps` block configures how to retrieve sourcemaps. +Sourcemaps are then used to transform file and line information from minified code into the file and line information from the original source code. + +Name | Type | Description | Default | Required +------------------------|----------------|--------------------------------------------|---------|--------- +`download` | `bool` | Whether to download sourcemaps. | `true` | no +`download_from_origins` | `list(string)` | Which origins to download sourcemaps from. | `["*"]` | no +`download_timeout` | `duration` | Timeout when downloading sourcemaps. | `"1s"` | no + +When exceptions are sent to the `faro.receiver` component, it can download sourcemaps from the web application. +You can disable this behavior by setting the `download` argument to `false`. + +The `download_from_origins` argument determines which origins a sourcemap may be downloaded from. +The origin is attached to the URL that a browser is sending telemetry data from. +The default value, `["*"]`, enables downloading sourcemaps from all origins. +The `*` character indicates a wildcard. + +By default, sourcemap downloads are subject to a timeout of `"1s"`, specified by the `download_timeout` argument. +Setting `download_timeout` to `"0s"` disables timeouts. + +To retrieve sourcemaps from disk instead of the network, specify one or more [`location` blocks][location]. +When `location` blocks are provided, they are checked first for sourcemaps before falling back to downloading. + +### location block + +The `location` block declares a location where sourcemaps are stored on the filesystem. +The `location` block can be specified multiple times to declare multiple locations where sourcemaps are stored. + +Name | Type | Description | Default | Required +-----------------------|----------|-----------------------------------------------------|---------|--------- +`path` | `string` | The path on disk where sourcemaps are stored. | | yes +`minified_path_prefix` | `string` | The prefix of the minified path sent from browsers. | | yes + +The `minified_path_prefix` argument determines the prefix of paths to Javascript files, such as `http://example.com/`. +The `path` argument then determines where to find the sourcemap for the file. + +For example, given the following location block: + +``` +location { + path = "/var/my-app/build" + minified_path_prefix = "http://example.com/" +} +``` + +To look up the sourcemaps for a file hosted at `http://example.com/foo.js`, the `faro.receiver` component will: + +1. Remove the minified path prefix to extract the path to the file (`foo.js`). +2. Search for that file path with a `.map` extension (`foo.js.map`) in `path` (`/var/my-app/build/foo.js.map`). + +Optionally, the value for the `path` argument may contain `{{ .Release }}` as a template value, such as `/var/my-app/{{ .Release }}/build`. +The template value will be replaced with the release value provided by the [Faro Web App SDK][faro-sdk]. + +### output block + +The `output` block specifies where to forward collected logs and traces. + +Name | Type | Description | Default | Required +---------|--------------------------|------------------------------------------------------|---------|--------- +`logs` | `list(LogsReceiver)` | A list of `loki` components to forward logs to. | `[]` | no +`traces` | `list(otelcol.Consumer)` | A list of `otelcol` components to forward traces to. | `[]` | no + +## Exported fields + +`faro.receiver` does not export any fields. + +## Component health + +`faro.receiver` is reported as unhealthy when the integrated server fails to +start. + +## Debug information + +`faro.receiver` does not expose any component-specific debug information. + +## Debug metrics + +`faro.receiver` exposes the following metrics for monitoring the component: + +* `faro_receiver_logs_total` (counter): Total number of ingested logs. +* `faro_receiver_measurements_total` (counter): Total number of ingested measurements. +* `faro_receiver_exceptions_total` (counter): Total number of ingested exceptions. +* `faro_receiver_events_total` (counter): Total number of ingested events. +* `faro_receiver_exporter_errors_total` (counter): Total number of errors produced by an internal exporter. +* `faro_receiver_request_duration_seconds` (histogram): Time (in seconds) spent serving HTTP requests. +* `faro_receiver_request_message_bytes` (histogram): Size (in bytes) of HTTP requests received from clients. +* `faro_receiver_response_message_bytes` (histogram): Size (in bytes) of HTTP responses sent to clients. +* `faro_receiver_inflight_requests` (gauge): Current number of inflight requests. +* `faro_receiver_sourcemap_cache_size` (counter): Number of items in sourcemap cache per origin. +* `faro_receiver_sourcemap_downloads_total` (counter): Total number of sourcemap downloads performed per origin and status. +* `faro_receiver_sourcemap_file_reads_total` (counter): Total number of sourcemap retrievals using the filesystem per origin and status. + +## Example + +```river +faro.receiver "default" { + server { + listen_address = "NETWORK_ADDRESS" + } + + sourcemaps { + location { + path = "PATH_TO_SOURCEMAPS" + minified_path_prefix = "WEB_APP_PREFIX" + } + } + + output { + logs = [loki.write.default.receiver] + traces = [otelcol.exporter.otlp.traces.input] + } +} + +loki.write "default" { + endpoint { + url = "https://LOKI_ADDRESS/api/v1/push" + } +} + +otelcol.exporter.otlp "traces" { + client { + endpoint = "OTLP_ADDRESS" + } +} +``` + +Replace the following: + +* `NETWORK_ADDRESS`: IP address of the network interface to listen to traffic on. + This IP address must be reachable by browsers using the web application to instrument. + +* `PATH_TO_SOURCEMAPS`: Path on disk where sourcemaps are located. + +* `WEB_APP_PREFIX`: Prefix of the web application being instrumented. + +* `LOKI_ADDRESS`: Address of the Loki server to send logs to. + + * If authentication is required to send logs to the Loki server, refer to the + documentation of [loki.write][] for more information. + +* `OTLP_ADDRESS`: The address of the OTLP-compatible server to send traces to. + + * If authentication is required to send logs to the Loki server, refer to the + documentation of [otelcol.exporter.otlp][] for more information. + +[loki.write]: ../loki.write/ +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ + + + +## Compatible components + +`faro.receiver` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) +- Components that export [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-exporters) + + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + diff --git a/docs/sources/reference/components/local.file.md b/docs/sources/reference/components/local.file.md new file mode 100644 index 0000000000..69628d9f01 --- /dev/null +++ b/docs/sources/reference/components/local.file.md @@ -0,0 +1,72 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/local.file/ +description: Learn about local.file +title: local.file +--- + +# local.file + +`local.file` exposes the contents of a file on disk to other components. +The file will be watched for changes so that its latest content is always exposed. + +The most common use of `local.file` is to load secrets (e.g., API keys) from files. + +Multiple `local.file` components can be specified by giving them different labels. + +## Usage + +```river +local.file "LABEL" { + filename = FILE_NAME +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +-----------------|------------|----------------------------------------------------|--------------|--------- +`filename` | `string` | Path of the file on disk to watch | | yes +`detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no +`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no +`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no + +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets + +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +----------|----------------------|--------------------------------------------------- +`content` | `string` or `secret` | The contents of the file from the most recent read + +The `content` field will have the `secret` type only if the `is_secret` argument was true. + +## Component health + +`local.file` will be reported as healthy whenever if the watched file was read successfully. + +Failing to read the file whenever an update is detected (or after the poll period elapses) will cause the component to be reported as unhealthy. +When unhealthy, exported fields will be kept at the last healthy value. +The read error will be exposed as a log message and in the debug information for the component. + +## Debug information + +`local.file` does not expose any component-specific debug information. + +## Debug metrics + +* `agent_local_file_timestamp_last_accessed_unix_seconds` (gauge): The timestamp, in Unix seconds, that the file was last successfully accessed. + +## Example + +```river +local.file "secret_key" { + filename = "/var/secrets/password.txt" + is_secret = true +} +``` diff --git a/docs/sources/flow/reference/components/local.file_match.md b/docs/sources/reference/components/local.file_match.md similarity index 85% rename from docs/sources/flow/reference/components/local.file_match.md rename to docs/sources/reference/components/local.file_match.md index 1413a1f8a2..5831311871 100644 --- a/docs/sources/flow/reference/components/local.file_match.md +++ b/docs/sources/reference/components/local.file_match.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file_match/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/local.file_match/ description: Learn about local.file_match title: local.file_match --- @@ -42,8 +37,8 @@ Name | Type | Description The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -52,9 +47,8 @@ Each target includes the following labels: ## Component health -`local.file_match` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`local.file_match` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -68,8 +62,8 @@ values. ### Send `/tmp/logs/*.log` files to Loki -This example discovers all files and folders under `/tmp/logs`. The absolute paths are -used by `loki.source.file.files` targets. +This example discovers all files and folders under `/tmp/logs`. +The absolute paths are used by `loki.source.file.files` targets. ```river local.file_match "tmp" { diff --git a/docs/sources/flow/reference/components/loki.echo.md b/docs/sources/reference/components/loki.echo.md similarity index 67% rename from docs/sources/flow/reference/components/loki.echo.md rename to docs/sources/reference/components/loki.echo.md index eb16448a86..675c2ef3ce 100644 --- a/docs/sources/flow/reference/components/loki.echo.md +++ b/docs/sources/reference/components/loki.echo.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.echo/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.echo/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.echo/ description: Learn about loki.echo labels: stage: beta @@ -13,13 +8,11 @@ title: loki.echo # loki.echo -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} -`loki.echo` receives log entries from other `loki` components and prints them -to the process' standard output (stdout). +`loki.echo` receives log entries from other `loki` components and prints them to the process' standard output (stdout). -Multiple `loki.echo` components can be specified by giving them -different labels. +Multiple `loki.echo` components can be specified by giving them different labels. ## Usage @@ -35,8 +28,8 @@ loki.echo "LABEL" {} The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +-----------|----------------|-------------------------------------------------------------- `receiver` | `LogsReceiver` | A value that other components can use to send log entries to. ## Component health @@ -49,8 +42,7 @@ Name | Type | Description ## Example -This example creates a pipeline that reads log files from `/var/log` and -prints log lines to echo: +This example creates a pipeline that reads log files from `/var/log` and prints log lines to echo: ```river local.file_match "varlog" { diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/reference/components/loki.process.md similarity index 81% rename from docs/sources/flow/reference/components/loki.process.md rename to docs/sources/reference/components/loki.process.md index f30efb5767..93f3455d0b 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/reference/components/loki.process.md @@ -4,27 +4,20 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.process/ description: Learn about loki.process title: loki.process --- # loki.process -`loki.process` receives log entries from other loki components, applies one or -more processing _stages_, and forwards the results to the list of receivers -in the component's arguments. +`loki.process` receives log entries from other loki components, applies one or more processing _stages_, and forwards the results to the list of receivers in the component's arguments. -A stage is a multi-purpose tool that can parse, transform, and filter log -entries before they're passed to a downstream component. These stages are -applied to each log entry in order of their appearance in the configuration -file. All stages within a `loki.process` block have access to the log entry's -label set, the log line, the log timestamp, as well as a shared map of -'extracted' values so that the results of one stage can be used in a subsequent -one. +A stage is a multi-purpose tool that can parse, transform, and filter log entries before they're passed to a downstream component. +These stages are applied to each log entry in order of their appearance in the configuration file. +All stages within a `loki.process` block have access to the log entry's label set, the log line, the log timestamp, as well as a shared map of 'extracted' values so that the results of one stage can be used in a subsequent one. -Multiple `loki.process` components can be specified by giving them -different labels. +Multiple `loki.process` components can be specified by giving them different labels. ## Usage @@ -79,9 +72,7 @@ The following blocks are supported inside the definition of `loki.process`: | stage.tenant | [stage.tenant][] | Configures a `tenant` processing stage. | no | | stage.timestamp | [stage.timestamp][] | Configures a `timestamp` processing stage. | no | -A user can provide any number of these stage blocks nested inside -`loki.process`; these will run in order of appearance in the configuration -file. +A user can provide any number of these stage blocks nested inside `loki.process`; these will run in order of appearance in the configuration file. [stage.cri]: #stagecri-block [stage.decolorize]: #stagedecolorize-block @@ -112,8 +103,7 @@ file. ### stage.cri block -The `stage.cri` inner block enables a predefined pipeline which reads log lines using -the CRI logging format. +The `stage.cri` inner block enables a predefined pipeline which reads log lines using the CRI logging format. The following arguments are supported: @@ -123,23 +113,21 @@ The following arguments are supported: | `max_partial_line_size` | `number` | Maximum number of characters which a partial line can have. | `0` | no | | `max_partial_line_size_truncate` | `bool` | Truncate partial lines that are longer than `max_partial_line_size`. | `false` | no | -`max_partial_line_size` is only taken into account if -`max_partial_line_size_truncate` is set to `true`. +`max_partial_line_size` is only taken into account if `max_partial_line_size_truncate` is set to `true`. ```river stage.cri {} ``` -CRI specifies log lines as single space-delimited values with the following -components: +CRI specifies log lines as single space-delimited values with the following components: * `time`: The timestamp string of the log * `stream`: Either `stdout` or `stderr` * `flags`: CRI flags including `F` or `P` * `log`: The contents of the log line -Given the following log line, the subsequent key-value pairs are created in the -shared map of extracted data: +Given the following log line, the subsequent key-value pairs are created in the shared map of extracted data: + ``` "2019-04-30T02:12:41.8443515Z stdout F message" @@ -150,18 +138,15 @@ timestamp: 2019-04-30T02:12:41.8443515 ### stage.decolorize block -The `stage.decolorize` strips ANSI color codes from the log lines, thus making -it easier to parse logs further. +The `stage.decolorize` strips ANSI color codes from the log lines, thus making it easier to parse logs further. -The `stage.decolorize` block does not support any arguments or inner blocks, so -it is always empty. +The `stage.decolorize` block does not support any arguments or inner blocks, so it is always empty. ```river stage.decolorize {} ``` -`stage.decolorize` turns each line having a color code into a non-colored one, -for example: +`stage.decolorize` turns each line having a color code into a non-colored one, for example: ``` [2022-11-04 22:17:57.811] \033[0;32http\033[0m: GET /_health (0 ms) 204 @@ -175,11 +160,9 @@ is turned into ### stage.docker block -The `stage.docker` inner block enables a predefined pipeline which reads log lines in -the standard format of Docker log files. +The `stage.docker` inner block enables a predefined pipeline which reads log lines in the standard format of Docker log files. -The `stage.docker` block does not support any arguments or inner blocks, so it is -always empty. +The `stage.docker` block does not support any arguments or inner blocks, so it is always empty. ```river stage.docker {} @@ -191,8 +174,7 @@ Docker log entries are formatted as JSON with the following keys: * `stream`: Either `stdout` or `stderr` * `time`: The timestamp string of the log line -Given the following log line, the subsequent key-value pairs are created in the -shared map of extracted data: +Given the following log line, the subsequent key-value pairs are created in the shared map of extracted data: ``` {"log":"log message\n","stream":"stderr","time":"2019-04-30T02:12:41.8443515Z"} @@ -204,9 +186,8 @@ timestamp: 2019-04-30T02:12:41.8443515 ### stage.drop block -The `stage.drop` inner block configures a filtering stage that drops log entries -based on several options. If multiple options are provided, they're treated -as AND clauses and must _all_ be true for the log entry to be dropped. +The `stage.drop` inner block configures a filtering stage that drops log entries based on several options. +If multiple options are provided, they're treated as AND clauses and must _all_ be true for the log entry to be dropped. To drop entries with an OR clause, specify multiple `drop` blocks in sequence. The following arguments are supported: @@ -222,28 +203,17 @@ The following arguments are supported: | `drop_counter_reason` | `string` | A custom reason to report for dropped lines. | `"drop_stage"` | no | The `expression` field must be a RE2 regex string. -* If `source` is empty or not provided, the regex attempts to match the log -line itself. -* If `source` is a single name, the regex attempts to match the corresponding -value from the extracted map. -* If `source` is a comma-separated list of names, the corresponding values from -the extracted map are concatenated using `separator` and the regex attempts to -match the concatenated string. - -The `value` field can only work with values from the extracted map, and must be -specified together with `source`. -* If `source` is a single name, the entries are dropped when there is an exact -match between the corresponding value from the extracted map and the `value`. -* If `source` is a comma-separated list of names, the entries are dropped when -the `value` matches the `source` values from extracted data, concatenated using -the `separator`. - -Whenever an entry is dropped, the metric `loki_process_dropped_lines_total` -is incremented. By default, the reason label is `"drop_stage"`, but you can -provide a custom label using the `drop_counter_reason` argument. - -The following stage drops log entries that contain the word `debug` _and_ are -longer than 1KB. +* If `source` is empty or not provided, the regex attempts to match the log line itself. +* If `source` is a single name, the regex attempts to match the corresponding value from the extracted map. +* If `source` is a comma-separated list of names, the corresponding values from the extracted map are concatenated using `separator` and the regex attempts to match the concatenated string. + +The `value` field can only work with values from the extracted map, and must be specified together with `source`. +* If `source` is a single name, the entries are dropped when there is an exact match between the corresponding value from the extracted map and the `value`. +* If `source` is a comma-separated list of names, the entries are dropped when the `value` matches the `source` values from extracted data, concatenated using the `separator`. + +Whenever an entry is dropped, the metric `loki_process_dropped_lines_total` is incremented. By default, the reason label is `"drop_stage"`, but you can provide a custom label using the `drop_counter_reason` argument. + +The following stage drops log entries that contain the word `debug` _and_ are longer than 1KB. ```river stage.drop { @@ -252,9 +222,7 @@ stage.drop { } ``` -On the following example, we define multiple `drop` blocks so `loki.process` -drops entries that are either 24h or older, are longer than 8KB, _or_ the -extracted value of 'app' is equal to foo. +On the following example, we define multiple `drop` blocks so `loki.process` drops entries that are either 24h or older, are longer than 8KB, _or_ the extracted value of 'app' is equal to foo. ```river stage.drop { @@ -275,8 +243,7 @@ stage.drop { ### stage.eventlogmessage block -The `eventlogmessage` stage extracts data from the Message string that appears -in the Windows Event Log. +The `eventlogmessage` stage extracts data from the Message string that appears in the Windows Event Log. The following arguments are supported: @@ -286,20 +253,18 @@ The following arguments are supported: | `overwrite_existing` | `bool` | Whether to overwrite existing extracted data fields. | `false` | no | | `drop_invalid_labels` | `bool` | Whether to drop fields that are not valid label names. | `false` | no | -When `overwrite_existing` is set to `true`, the stage overwrites existing extracted data -fields with the same name. If set to `false`, the `_extracted` suffix will be -appended to an already existing field name. +When `overwrite_existing` is set to `true`, the stage overwrites existing extracted data fields with the same name. +If set to `false`, the `_extracted` suffix will be appended to an already existing field name. -When `drop_invalid_labels` is set to `true`, the stage drops fields that are -not valid label names. If set to `false`, the stage will automatically convert -them into valid labels replacing invalid characters with underscores. +When `drop_invalid_labels` is set to `true`, the stage drops fields that are not valid label names. +If set to `false`, the stage will automatically convert them into valid labels replacing invalid characters with underscores. #### Example combined with `stage.json` ```river stage.json { - expressions = { - message = "", + expressions = { + message = "", Overwritten = "", } } @@ -315,14 +280,12 @@ Given the following log line: {"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\agent.exe"} ``` -The first stage would create the following key-value pairs in the set of -extracted data: +The first stage would create the following key-value pairs in the set of extracted data: - `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\agent.exe` - `Overwritten`: `old` -The second stage will parse the value of `message` from the extracted data -and append/overwrite the following key-value pairs to the set of extracted data: +The second stage will parse the value of `message` from the extracted data and append/overwrite the following key-value pairs to the set of extracted data: - `Image`: `C:\\Users\\User\\agent.exe` - `Message_type`: (empty string) @@ -330,10 +293,9 @@ and append/overwrite the following key-value pairs to the set of extracted data: ### stage.json block -The `stage.json` inner block configures a JSON processing stage that parses incoming -log lines or previously extracted values as JSON and uses -[JMESPath expressions](https://jmespath.org/tutorial.html) to extract new -values from them. +The `stage.json` inner block configures a JSON processing stage that parses incoming log lines or previously extracted values as JSON and uses [JMESPath expressions][] to extract new values from them. + +[JMESPath expressions]: https://jmespath.org/tutorial.html The following arguments are supported: @@ -343,13 +305,11 @@ The following arguments are supported: | `source` | `string` | Source of the data to parse as JSON. | `""` | no | | `drop_malformed` | `bool` | Drop lines whose input cannot be parsed as valid JSON. | `false` | no | -When configuring a JSON stage, the `source` field defines the source of data to -parse as JSON. By default, this is the log line itself, but it can also be a -previously extracted value. +When configuring a JSON stage, the `source` field defines the source of data to parse as JSON. +By default, this is the log line itself, but it can also be a previously extracted value. -The `expressions` field is the set of key-value pairs of JMESPath expressions to -run. The map key defines the name with which the data is extracted, while the -map value is the expression used to populate the value. +The `expressions` field is the set of key-value pairs of JMESPath expressions to run. +The map key defines the name with which the data is extracted, while the map value is the expression used to populate the value. Here's a given log line and two JSON stages to run. @@ -368,27 +328,22 @@ loki.process "username" { } ``` -In this example, the first stage uses the log line as the source and populates -these values in the shared map. An empty expression means using the same value -as the key (so `extra="extra"`). +In this example, the first stage uses the log line as the source and populates these values in the shared map. +An empty expression means using the same value as the key (so `extra="extra"`). ``` output: log message\n extra: {"user": "agent"} ``` -The second stage uses the value in `extra` as the input and appends the -following key-value pair to the set of extracted data. +The second stage uses the value in `extra` as the input and appends the following key-value pair to the set of extracted data. ``` username: agent ``` {{< admonition type="note" >}} -Due to a limitation of the upstream jmespath library, you must wrap any string -that contains a hyphen `-` in quotes so that it's not considered a numerical -expression. - -If you don't use quotes to wrap a string that contains a hyphen, you will get -errors like: `Unexpected token at the end of the expression: tNumber` +Due to a limitation of the upstream jmespath library, you must wrap any string that contains a hyphen `-` in quotes so that it's not considered a numerical expression. + +If you don't use quotes to wrap a string that contains a hyphen, you will get errors like: `Unexpected token at the end of the expression: tNumber` You can use one of two options to circumvent this issue: @@ -415,8 +370,7 @@ stage.label_drop { ### stage.label_keep block -The `stage.label_keep` inner block configures a processing stage that filters the -label set of an incoming log entry down to a subset. +The `stage.label_keep` inner block configures a processing stage that filters the label set of an incoming log entry down to a subset. The following arguments are supported: @@ -433,8 +387,7 @@ stage.label_keep { ### stage.labels block -The `stage.labels` inner block configures a labels processing stage that can read -data from the extracted values map and set new labels on incoming log entries. +The `stage.labels` inner block configures a labels processing stage that can read data from the extracted values map and set new labels on incoming log entries. The following arguments are supported: @@ -442,9 +395,8 @@ The following arguments are supported: | -------- | ------------- | --------------------------------------- | ------- | -------- | | `values` | `map(string)` | Configures a `labels` processing stage. | `{}` | no | -In a labels stage, the map's keys define the label to set and the values are -how to look them up. If the value is empty, it is inferred to be the same as -the key. +In a labels stage, the map's keys define the label to set and the values are how to look them up. +If the value is empty, it is inferred to be the same as the key. ```river stage.labels { @@ -457,8 +409,7 @@ stage.labels { ### stage.structured_metadata block -The `stage.structured_metadata` inner block configures a stage that can read -data from the extracted values map and add them to log entries as structured metadata. +The `stage.structured_metadata` inner block configures a stage that can read data from the extracted values map and add them to log entries as structured metadata. The following arguments are supported: @@ -466,9 +417,8 @@ The following arguments are supported: | -------- | ------------- |-----------------------------------------------------------------------------| ------- | -------- | | `values` | `map(string)` | Specifies the list of labels to add from extracted values map to log entry. | `{}` | no | -In a structured_metadata stage, the map's keys define the label to set and the values are -how to look them up. If the value is empty, it is inferred to be the same as -the key. +In a structured_metadata stage, the map's keys define the label to set and the values are how to look them up. +If the value is empty, it is inferred to be the same as the key. ```river stage.structured_metadata { @@ -481,8 +431,7 @@ stage.structured_metadata { ### stage.limit block -The `stage.limit` inner block configures a rate-limiting stage that throttles logs -based on several options. +The `stage.limit` inner block configures a rate-limiting stage that throttles logs based on several options. The following arguments are supported: @@ -494,10 +443,8 @@ The following arguments are supported: | `drop` | `bool` | Whether to discard or backpressure lines that exceed the rate limit. | `false` | no | | `max_distinct_labels` | `number` | The number of unique values to keep track of when rate-limiting `by_label_name`. | `10000` | no | -The rate limiting is implemented as a "token bucket" of size `burst`, initially -full and refilled at `rate` tokens per second. Each received log entry consumes one token from the bucket. When `drop` is set to true, incoming entries -that exceed the rate-limit are dropped, otherwise they are queued until -more tokens are available. +The rate limiting is implemented as a "token bucket" of size `burst`, initially full and refilled at `rate` tokens per second. +Each received log entry consumes one token from the bucket. When `drop` is set to true, incoming entries that exceed the rate-limit are dropped, otherwise they are queued until more tokens are available. ```river stage.limit { @@ -506,13 +453,13 @@ stage.limit { } ``` -If `by_label_name` is set, then `drop` must be set to `true`. This enables the -stage to rate-limit not by the number of lines but by the number of labels. +If `by_label_name` is set, then `drop` must be set to `true`. +This enables the stage to rate-limit not by the number of lines but by the number of labels. + +The following example rate-limits entries from each unique `namespace` value independently. +Any entries without the `namespace` label are not rate-limited. +The stage keeps track of up to `max_distinct_labels` unique values, defaulting at 10000. -The following example rate-limits entries from each unique `namespace` value -independently. Any entries without the `namespace` label are not rate-limited. -The stage keeps track of up to `max_distinct_labels` unique -values, defaulting at 10000. ```river stage.limit { rate = 10 @@ -525,8 +472,7 @@ stage.limit { ### stage.logfmt block -The `stage.logfmt` inner block configures a processing stage that reads incoming log -lines as logfmt and extracts values from them. +The `stage.logfmt` inner block configures a processing stage that reads incoming log lines as logfmt and extracts values from them. The following arguments are supported: @@ -536,14 +482,12 @@ The following arguments are supported: | `source` | `string` | Source of the data to parse as logfmt. | `""` | no | -The `source` field defines the source of data to parse as logfmt. When `source` -is missing or empty, the stage parses the log line itself, but it can also be -used to parse a previously extracted value. +The `source` field defines the source of data to parse as logfmt. When `source` is missing or empty, the stage parses the log line itself, but it can also be used to parse a previously extracted value. + +This stage uses the [go-logfmt][] unmarshaler, so that numeric or boolean types are unmarshalled into their correct form. The stage does not perform any other type conversions. +If the extracted value is a complex type, it is treated as a string. -This stage uses the [go-logfmt](https://github.com/go-logfmt/logfmt) -unmarshaler, so that numeric or boolean types are unmarshalled into their -correct form. The stage does not perform any other type conversions. If the -extracted value is a complex type, it is treated as a string. +[go-logfmt]: https://github.com/go-logfmt/logfmt Let's see how this works on the following log line and stages. @@ -560,17 +504,13 @@ stage.logfmt { } ``` -The first stage parses the log line itself and inserts the `extra` key in the -set of extracted data, with the value of `user=foo`. +The first stage parses the log line itself and inserts the `extra` key in the set of extracted data, with the value of `user=foo`. -The second stage parses the contents of `extra` and appends the `username: foo` -key-value pair to the set of extracted data. +The second stage parses the contents of `extra` and appends the `username: foo` key-value pair to the set of extracted data. ### stage.match block -The `stage.match` inner block configures a filtering stage that can conditionally -either apply a nested set of processing stages or drop an entry when a log -entry matches a configurable LogQL stream selector and filter expressions. +The `stage.match` inner block configures a filtering stage that can conditionally either apply a nested set of processing stages or drop an entry when a log entry matches a configurable LogQL stream selector and filter expressions. The following arguments are supported: @@ -585,18 +525,16 @@ The following arguments are supported: The filters do not include label filter expressions such as `| label == "foobar"`. {{< /admonition >}} -The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level -block. These are used to construct the nested set of stages to run if the -selector matches the labels and content of the log entries. It supports all the -same `stage.NAME` blocks as the in the top level of the loki.process component. +The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level block. +These are used to construct the nested set of stages to run if the selector matches the labels and content of the log entries. +It supports all the same `stage.NAME` blocks as the in the top level of the `loki.process` component. + +If the specified action is `"drop"`, the metric `loki_process_dropped_lines_total` is incremented with every line dropped. +By default, the reason label is `"match_stage"`, but a custom reason can be provided by using the `drop_counter_reason` argument. -If the specified action is `"drop"`, the metric -`loki_process_dropped_lines_total` is incremented with every line dropped. -By default, the reason label is `"match_stage"`, but a custom reason can be -provided by using the `drop_counter_reason` argument. +Let's see this in action, with the following log lines and stages: -Let's see this in action, with the following log lines and stages ``` { "time":"2023-01-18T17:08:41+00:00", "app":"foo", "component": ["parser","type"], "level" : "WARN", "message" : "app1 log line" } { "time":"2023-01-18T17:08:42+00:00", "app":"bar", "component": ["parser","type"], "level" : "ERROR", "message" : "foo noisy error" } @@ -636,35 +574,24 @@ stage.output { } ``` -The first two stages parse the log lines as JSON, decode the `app` value into -the shared extracted map as `appname`, and use its value as the `applbl` label. +The first two stages parse the log lines as JSON, decode the `app` value into the shared extracted map as `appname`, and use its value as the `applbl` label. -The third stage uses the LogQL selector to only execute the nested stages on -lines where the `applbl="foo"`. So, for the first line, the nested JSON stage -adds `msg="app1 log line"` into the extracted map. +The third stage uses the LogQL selector to only execute the nested stages on lines where the `applbl="foo"`. +So, for the first line, the nested JSON stage adds `msg="app1 log line"` into the extracted map. -The fourth stage uses the LogQL selector to only execute on lines where -`applbl="qux"`; that means it won't match any of the input, and the nested -JSON stage does not run. +The fourth stage uses the LogQL selector to only execute on lines where `applbl="qux"`; that means it won't match any of the input, and the nested JSON stage does not run. -The fifth stage drops entries from lines where `applbl` is set to 'bar' and the -line contents matches the regex `.*noisy error.*`. It also increments the -`loki_process_dropped_lines_total` metric with a label -`drop_counter_reason="discard_noisy_errors"`. +The fifth stage drops entries from lines where `applbl` is set to 'bar' and the line contents matches the regex `.*noisy error.*`. +It also increments the `loki_process_dropped_lines_total` metric with a label `drop_counter_reason="discard_noisy_errors"`. -The final output stage changes the contents of the log line to be the value of -`msg` from the extracted map. In this case, the first log entry's content is -changed to `app1 log line`. +The final output stage changes the contents of the log line to be the value of `msg` from the extracted map. In this case, the first log entry's content is changed to `app1 log line`. ### stage.metrics block -The `stage.metrics` inner block configures stage that allows to define and -update metrics based on values from the shared extracted map. The created -metrics are available at the Agent's root /metrics endpoint. +The `stage.metrics` inner block configures stage that allows to define and update metrics based on values from the shared extracted map. +The created metrics are available at the Agent's root /metrics endpoint. -The `stage.metrics` block does not support any arguments and is only configured via -a number of nested inner `metric.*` blocks, one for each metric that should be -generated. +The `stage.metrics` block does not support any arguments and is only configured via a number of nested inner `metric.*` blocks, one for each metric that should be generated. The following blocks are supported inside the definition of `stage.metrics`: @@ -680,28 +607,27 @@ The following blocks are supported inside the definition of `stage.metrics`: #### metric.counter block + Defines a metric whose value only goes up. The following arguments are supported: -| Name | Type | Description | Default | Required | -|---------------------|------------|----------------------------------------------------------------------------------------------------------|--------------------------|----------| -| `name` | `string` | The metric name. | | yes | -| `action` | `string` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | -| `description` | `string` | The metric's description and help text. | `""` | no | -| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | -| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | -| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | -| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | -| `match_all` | `bool` | If set to true, all log lines are counted, without attemptng to match the `source` to the extracted map. | `false` | no | -| `count_entry_bytes` | `bool` | If set to true, counts all log lines bytes. | `false` | no | +| Name | Type | Description | Default | Required | +|---------------------|------------|-----------------------------------------------------------------------------------------------------------|--------------------------|----------| +| `name` | `string` | The metric name. | | yes | +| `action` | `string` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | +| `description` | `string` | The metric's description and help text. | `""` | no | +| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | +| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | +| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | +| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | +| `match_all` | `bool` | If set to true, all log lines are counted, without attempting to match the `source` to the extracted map. | `false` | no | +| `count_entry_bytes` | `bool` | If set to true, counts all log lines bytes. | `false` | no | A counter cannot set both `match_all` to true _and_ a `value`. -A counter cannot set `count_entry_bytes` without also setting `match_all=true` -_or_ `action=add`. -The valid `action` values are `inc` and `add`. The `inc` action increases the -metric value by 1 for each log line that passed the filter. The `add` action -converts the extracted value to a positive float and adds it to the metric. +A counter cannot set `count_entry_bytes` without also setting `match_all=true` _or_ `action=add`. +The valid `action` values are `inc` and `add`. The `inc` action increases the metric value by 1 for each log line that passed the filter. +The `add` action converts the extracted value to a positive float and adds it to the metric. #### metric.gauge block @@ -722,8 +648,7 @@ The following arguments are supported: The valid `action` values are `inc`, `dec`, `set`, `add`, or `sub`. `inc` and `dec` increment and decrement the metric's value by 1 respectively. -If `set`, `add, or `sub` is chosen, the extracted value must be convertible -to a positive float and is set, added to, or subtracted from the metric's value. +If `set`, `add, or `sub` is chosen, the extracted value must be convertible to a positive float and is set, added to, or subtracted from the metric's value. #### metric.histogram block @@ -746,14 +671,12 @@ The following arguments are supported: If `value` is not present, all incoming log entries match. -Label values on created metrics can be dynamic, which can cause exported -metrics to explode in cardinality or go stale, for example, when a stream stops -receiving new logs. To prevent unbounded growth of the `/metrics` endpoint, any -metrics which have not been updated within `max_idle_duration` are removed. The -`max_idle_duration` must be greater or equal to `"1s"`, and it defaults to `"5m"`. +Label values on created metrics can be dynamic, which can cause exported metrics to explode in cardinality or go stale, for example, when a stream stops receiving new logs. +To prevent unbounded growth of the `/metrics` endpoint, any metrics which have not been updated within `max_idle_duration` are removed. +The `max_idle_duration` must be greater or equal to `"1s"`, and it defaults to `"5m"`. -The metric values extracted from the log data are internally converted to -floats. The supported values are the following: +The metric values extracted from the log data are internally converted to floats. +The supported values are the following: * integer * floating point number @@ -764,9 +687,12 @@ floats. The supported values are the following: * true is converted to 1. * false is converted to 0. -The following pipeline creates a counter which increments every time any log line is received by using the `match_all` parameter. The pipeline creates a second counter which adds the byte size of these log lines by using the `count_entry_bytes` parameter. +The following pipeline creates a counter which increments every time any log line is received by using the `match_all` parameter. +The pipeline creates a second counter which adds the byte size of these log lines by using the `count_entry_bytes` parameter. + +These two metrics disappear after 24 hours if no new entries are received, to avoid building up metrics which no longer serve any use. +These two metrics are a good starting point to track the volume of log streams in both the number of entries and their byte size, to identify sources of high-volume or high-cardinality data. -These two metrics disappear after 24 hours if no new entries are received, to avoid building up metrics which no longer serve any use. These two metrics are a good starting point to track the volume of log streams in both the number of entries and their byte size, to identify sources of high-volume or high-cardinality data. ```river stage.metrics { metric.counter { @@ -793,8 +719,7 @@ stage.metrics { } ``` -Here, the first stage uses a regex to extract text in the format -`order_status=` in the log line. +Here, the first stage uses a regex to extract text in the format `order_status=` in the log line. The second stage, defines a counter which increments the `successful_orders_total` and `failed_orders_total` based on the previously extracted values. ```river @@ -821,7 +746,8 @@ stage.metrics { } ``` -In this example, the first stage extracts text in the format of `retries=`, from the log line. The second stage creates a gauge whose current metric value is increased by the number extracted from the retries field. +In this example, the first stage extracts text in the format of `retries=`, from the log line. +The second stage creates a gauge whose current metric value is increased by the number extracted from the retries field. ```river stage.regex { @@ -837,9 +763,7 @@ stage.metrics { } ``` -The following example shows a histogram that reads `response_time` from the extracted -map and places it into a bucket, both increasing the count of the bucket and -the sum for that particular bucket: +The following example shows a histogram that reads `response_time` from the extracted map and places it into a bucket, both increasing the count of the bucket and the sum for that particular bucket: ```river stage.metrics { @@ -854,8 +778,7 @@ stage.metrics { ### stage.multiline block -The `stage.multiline` inner block merges multiple lines into a single block before -passing it on to the next stage in the pipeline. +The `stage.multiline` inner block merges multiple lines into a single block before passing it on to the next stage in the pipeline. The following arguments are supported: @@ -869,13 +792,12 @@ The following arguments are supported: A new block is identified by the RE2 regular expression passed in `firstline`. -Any line that does _not_ match the expression is considered to be part of the -block of the previous match. If no new logs arrive with `max_wait_time`, the -block is sent on. The `max_lines` field defines the maximum number of lines a -block can have. If this is exceeded, a new block is started. +Any line that does _not_ match the expression is considered to be part of the block of the previous match. +If no new logs arrive with `max_wait_time`, the block is sent on. +The `max_lines` field defines the maximum number of lines a block can have. +If this is exceeded, a new block is started. -Let's see how this works in practice with an example stage and a stream of log -entries from a Flask web service. +Let's see how this works in practice with an example stage and a stream of log entries from a Flask web service. ``` stage.multiline { @@ -905,16 +827,12 @@ Exception: Sorry, this route always breaks [2023-01-18 17:42:29] "GET /hello HTTP/1.1" 200 - ``` -All 'blocks' that form log entries of separate web requests start with a -timestamp in square brackets. The stage detects this with the regular -expression in `firstline` to collapse all lines of the traceback into a single -block and thus a single Loki log entry. +All 'blocks' that form log entries of separate web requests start with a timestamp in square brackets. +The stage detects this with the regular expression in `firstline` to collapse all lines of the traceback into a single block and thus a single Loki log entry. ### stage.output block -The `stage.output` inner block configures a processing stage that reads from the -extracted map and changes the content of the log entry that is forwarded -to the next component. +The `stage.output` inner block configures a processing stage that reads from the extracted map and changes the content of the log entry that is forwarded to the next component. The following arguments are supported: @@ -942,19 +860,17 @@ stage.output { ``` The first stage extracts the following key-value pairs into the shared map: + ``` user: John Doe message: hello, world! ``` -Then, the second stage adds `user="John Doe"` to the label set of the log -entry, and the final output stage changes the log line from the original -JSON to `hello, world!`. +Then, the second stage adds `user="John Doe"` to the label set of the log entry, and the final output stage changes the log line from the original JSON to `hello, world!`. ### stage.pack block -The `stage.pack` inner block configures a transforming stage that replaces the log -entry with a JSON object that embeds extracted values and labels with it. +The `stage.pack` inner block configures a transforming stage that replaces the log entry with a JSON object that embeds extracted values and labels with it. The following arguments are supported: @@ -963,16 +879,14 @@ The following arguments are supported: | `labels` | `list(string)` | The values from the extracted data and labels to pack with the log entry. | | yes | | `ingest_timestamp` | `bool` | Whether to replace the log entry timestamp with the time the `pack` stage runs. | `true` | no | -This stage lets you embed extracted values and labels together with the log -line, by packing them into a JSON object. The original message is stored under -the `_entry` key, and all other keys retain their values. This is useful in -cases where you _do_ want to keep a certain label or metadata, but you don't -want it to be indexed as a label due to high cardinality. +This stage lets you embed extracted values and labels together with the log line, by packing them into a JSON object. +The original message is stored under the `_entry` key, and all other keys retain their values. +This is useful in cases where you _do_ want to keep a certain label or metadata, but you don't want it to be indexed as a label due to high cardinality. -The querying capabilities of Loki make it easy to still access this data so it can -be filtered and aggregated at query time. +The querying capabilities of Loki make it easy to still access this data so it can be filtered and aggregated at query time. For example, consider the following log entry: + ``` log_line: "something went wrong" labels: { "level" = "error", "env" = "dev", "user_id" = "f8fas0r" } @@ -985,8 +899,8 @@ stage.pack { } ``` -The stage transforms the log entry into the following JSON object, where the two -embedded labels are removed from the original log entry: +The stage transforms the log entry into the following JSON object, where the two embedded labels are removed from the original log entry: + ```json { "_entry": "something went wrong", @@ -995,19 +909,15 @@ embedded labels are removed from the original log entry: } ``` -At query time, Loki's [`unpack` parser](/docs/loki/latest/logql/log_queries/#unpack) -can be used to access these embedded labels and replace the log line with the -original one stored in the `_entry` field automatically. +At query time, Loki's [`unpack` parser][unpack parser] can be used to access these embedded labels and replace the log line with the original one stored in the `_entry` field automatically. + +[unpack parser]: https://grafana.com/docs/loki/latest/logql/log_queries/#unpack -When combining several log streams to use with the `pack` stage, you can set -`ingest_timestamp` to true to avoid interlaced timestamps and -out-of-order ingestion issues. +When combining several log streams to use with the `pack` stage, you can set `ingest_timestamp` to true to avoid interlaced timestamps and out-of-order ingestion issues. ### stage.regex block -The `stage.regex` inner block configures a processing stage that parses log lines -using regular expressions and uses named capture groups for adding data into -the shared extracted map of values. +The `stage.regex` inner block configures a processing stage that parses log lines using regular expressions and uses named capture groups for adding data into the shared extracted map of values. The following arguments are supported: @@ -1017,19 +927,16 @@ The following arguments are supported: | `source` | `string` | Name from extracted data to parse. If empty, uses the log message. | `""` | no | -The `expression` field needs to be a RE2 regex string. Every matched capture -group is added to the extracted map, so it must be named like: `(?Pre)`. -The name of the capture group is then used as the key in the extracted map for -the matched value. +The `expression` field needs to be a RE2 regex string. +Every matched capture group is added to the extracted map, so it must be named like: `(?Pre)`. +The name of the capture group is then used as the key in the extracted map for the matched value. -Because of how River strings work, any backslashes in `expression` must be -escaped with a double backslash; for example `"\\w"` or `"\\S+"`. +Because of how River strings work, any backslashes in `expression` must be escaped with a double backslash; for example `"\\w"` or `"\\S+"`. If the `source` is empty or missing, then the stage parses the log line itself. If it's set, the stage parses a previously extracted value with the same name. -Given the following log line and regex stage, the extracted values are shown -below: +Given the following log line and regex stage, the extracted values are shown below: ``` 2019-01-01T01:00:00.000000001Z stderr P i'm a log message! @@ -1044,11 +951,10 @@ flags: P, content: i'm a log message ``` -On the other hand, if the `source` value is set, then the regex is applied to -the value stored in the shared map under that name. +On the other hand, if the `source` value is set, then the regex is applied to the value stored in the shared map under that name. + +Let's see what happens when the following log line is put through this two-stage pipeline: -Let's see what happens when the following log line is put through this -two-stage pipeline: ``` {"timestamp":"2022-01-01T01:00:00.000000001Z"} @@ -1062,21 +968,21 @@ stage.regex { ``` The first stage adds the following key-value pair into the extracted map: + ``` time: 2022-01-01T01:00:00.000000001Z ``` -Then, the regex stage parses the value for time from the shared values and -appends the subsequent key-value pair back into the extracted values map: +Then, the regex stage parses the value for time from the shared values and appends the subsequent key-value pair back into the extracted values map: + ``` year: 2022 ``` ### stage.replace block -The `stage.replace` inner block configures a stage that parses a log line using a -regular expression and replaces the log line contents. Named capture groups in -the regex also support adding data into the shared extracted map. +The `stage.replace` inner block configures a stage that parses a log line using a regular expression and replaces the log line contents. +Named capture groups in the regex also support adding data into the shared extracted map. The following arguments are supported: @@ -1087,19 +993,16 @@ The following arguments are supported: | `replace` | `string` | Value replaced by the capture group. | | no | -The `source` field defines the source of data to parse using `expression`. When -`source` is missing or empty, the stage parses the log line itself, but it can -also be used to parse a previously extracted value. The replaced value is -assigned back to the `source` key. +The `source` field defines the source of data to parse using `expression`. +When `source` is missing or empty, the stage parses the log line itself, but it can also be used to parse a previously extracted value. +The replaced value is assigned back to the `source` key. -The `expression` must be a valid RE2 regex. Every named capture group -`(?Pre)` is set into the extracted map with its name. +The `expression` must be a valid RE2 regex. +Every named capture group `(?Pre)` is set into the extracted map with its name. -Because of how River treats backslashes in double-quoted strings, note that all -backslashes in a regex expression must be escaped like `"\\w*"`. +Because of how River treats backslashes in double-quoted strings, note that all backslashes in a regex expression must be escaped like `"\\w*"`. -Let's see how this works with the following log line and stage. Since `source` -is omitted, the replacement occurs on the log line itself. +Let's see how this works with the following log line and stage. Since `source` is omitted, the replacement occurs on the log line itself. ``` 2023-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password xyz! @@ -1111,6 +1014,7 @@ stage.replace { ``` The log line is transformed to + ``` 2023-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password *****! ``` @@ -1118,6 +1022,7 @@ The log line is transformed to If `replace` is empty, then the captured value is omitted instead. In the following example, `source` is defined. + ``` {"time":"2023-01-01T01:00:00.000000001Z", "level": "info", "msg":"11.11.11.11 - \"POST /loki/api/push/ HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0\"} @@ -1133,25 +1038,27 @@ stage.replace { ``` The JSON stage adds the following key-value pairs into the extracted map: + ``` time: 2023-01-01T01:00:00.000000001Z level: info msg: "11.11.11.11 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0" ``` -The `replace` stage acts on the `msg` value. The capture group matches against -`/loki/api/push` and is replaced by `redacted_url`. +The `replace` stage acts on the `msg` value. The capture group matches against `/loki/api/push` and is replaced by `redacted_url`. The `msg` value is finally transformed into: + ``` msg: "11.11.11.11 - "POST redacted_url HTTP/1.1" 200 932 "-" "Mozilla/5.0" ``` -The `replace` field can use a set of templating functions, by utilizing Go's -[text/template](https://pkg.go.dev/text/template) package. +The `replace` field can use a set of templating functions, by utilizing Go's [text/template][] package. + +[text/template]: https://pkg.go.dev/text/template + +Let's see how this works with named capture groups with a sample log line and stage. -Let's see how this works with named capture groups with a sample log line -and stage. ``` 11.11.11.11 - agent [01/Jan/2023:00:00:01 +0200] @@ -1161,9 +1068,9 @@ stage.replace { } ``` -Since `source` is empty, the regex parses the log line itself and extracts the -named capture groups to the shared map of values. The `replace` field acts on -these extracted values and converts them to uppercase: +Since `source` is empty, the regex parses the log line itself and extracts the named capture groups to the shared map of values. +The `replace` field acts on these extracted values and converts them to uppercase: + ``` ip: 11.11.11.11 identd: - @@ -1172,12 +1079,13 @@ timestamp: 01/JAN/2023:00:00:01 +0200 ``` and the log line becomes: + ``` 11.11.11.11 - FRANK [01/JAN/2023:00:00:01 +0200] ``` -The following list contains available functions with examples of -more complex `replace` fields. +The following list contains available functions with examples of more complex `replace` fields. + ``` ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, TrimSpace, Hash, Sha2Hash, regexReplaceAll, regexReplaceAllLiteral @@ -1187,9 +1095,8 @@ ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, Trim ### stage.sampling block -The `sampling` stage is used to sample the logs. Configuring the value -`rate = 0.1` means that 10% of the logs will continue to be processed. The -remaining 90% of the logs will be dropped. +The `sampling` stage is used to sample the logs. Configuring the value `rate = 0.1` means that 10% of the logs will continue to be processed. +The remaining 90% of the logs will be dropped. The following arguments are supported: @@ -1198,9 +1105,8 @@ The following arguments are supported: | `rate` | `float` | The sampling rate in a range of `[0, 1]` | | yes | | `drop_counter_reason` | `string` | The label to add to `loki_process_dropped_lines_total` metric when logs are dropped by this stage. | sampling_stage | no | -For example, the configuration below will sample 25% of the logs and drop the -remaining 75%. When logs are dropped, the `loki_process_dropped_lines_total` -metric is incremented with an additional `reason=logs_sampling` label. +For example, the configuration below will sample 25% of the logs and drop the remaining 75%. +When logs are dropped, the `loki_process_dropped_lines_total` metric is incremented with an additional `reason=logs_sampling` label. ```river stage.sampling { @@ -1211,8 +1117,7 @@ stage.sampling { ### stage.static_labels block -The `stage.static_labels` inner block configures a static_labels processing stage -that adds a static set of labels to incoming log entries. +The `stage.static_labels` inner block configures a static_labels processing stage that adds a static set of labels to incoming log entries. The following arguments are supported: @@ -1232,13 +1137,11 @@ stage.static_labels { ### stage.template block -The `stage.template` inner block configures a transforming stage that allows users to -manipulate the values in the extracted map by using Go's `text/template` -[package](https://pkg.go.dev/text/template) syntax. This stage is primarily -useful for manipulating and standardizing data from previous stages before -setting them as labels in a subsequent stage. Example use cases are replacing -spaces with underscores, converting uppercase strings to lowercase, or hashing -a value. +The `stage.template` inner block configures a transforming stage that allows users to manipulate the values in the extracted map by using Go's `text/template` [package][] syntax. +This stage is primarily useful for manipulating and standardizing data from previous stages before setting them as labels in a subsequent stage. +Example use cases are replacing spaces with underscores, converting uppercase strings to lowercase, or hashing a value. + +[package]: https://pkg.go.dev/text/template The template stage can also create new keys in the extracted map. @@ -1249,18 +1152,21 @@ The following arguments are supported: | `source` | `string` | Name from extracted data to parse. If the key doesn't exist, a new entry is created. | | yes | | `template` | `string` | Go template string to use. | | yes | -The template string can be any valid template that can be used by Go's `text/template`. It supports all functions from the [sprig package](http://masterminds.github.io/sprig/), as well as the following list of custom functions: +The template string can be any valid template that can be used by Go's `text/template`. +It supports all functions from the [sprig package][], as well as the following list of custom functions: + +[sprig package]: http://masterminds.github.io/sprig/ + ``` ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, TrimSpace, Hash, Sha2Hash, regexReplaceAll, regexReplaceAllLiteral ``` -More details on each of these functions can be found in the [supported -functions][] section below. +More details on each of these functions can be found in the [supported functions][] section below. [supported functions]: #supported-functions -Assuming no data is present on the extracted map, the following stage simply -adds the `new_key: "hello_world"`key-value pair to the shared map. +Assuming no data is present on the extracted map, the following stage simply adds the `new_key: "hello_world"` key-value pair to the shared map. + ```river stage.template { source = "new_key" @@ -1269,8 +1175,8 @@ stage.template { ``` If the `source` value exists in the extract fields, its value can be referred to as `.Value` in the template. -The next stage takes the current value of `app` from the extracted map, -converts it to lowercase, and adds a suffix to its value: +The next stage takes the current value of `app` from the extracted map, converts it to lowercase, and adds a suffix to its value: + ```river stage.template { source = "app" @@ -1279,8 +1185,8 @@ stage.template { ``` Any previously extracted keys are available for `template` to expand and use. -The next stage takes the current values for `level`, `app` and `module` and -creates a new key named `output_message`: +The next stage takes the current values for `level`, `app` and `module` and creates a new key named `output_message`: + ```river stage.template { source = "output_msg" @@ -1288,8 +1194,8 @@ stage.template { } ``` -A special key named `Entry` can be used to reference the current line; this can -be useful when you need to append/prepend something to the log line, like this snippet: +A special key named `Entry` can be used to reference the current line; this can be useful when you need to append/prepend something to the log line, like this snippet: + ```river stage.template { source = "message" @@ -1304,6 +1210,7 @@ stage.output { In addition to supporting all functions from the [sprig package](http://masterminds.github.io/sprig/), the `template` stage supports the following custom functions. ##### ToLower and ToUpper + `ToLower` and `ToUpper` convert the entire string to lowercase and uppercase, respectively. @@ -1320,6 +1227,7 @@ stage.template { ``` ##### Replace + The `Replace` function syntax is defined as `{{ Replace }}`. The function returns a copy of the input string, with instances of the `` @@ -1337,6 +1245,7 @@ stage.template { ``` ##### Trim, TrimLeft, TrimRight, TrimSpace, TrimPrefix, TrimSuffix + * `Trim` returns a slice of the string `s` with all leading and trailing Unicode code points contained in `cutset` removed. * `TrimLeft` and `TrimRight` are the same as Trim except that they @@ -1344,7 +1253,9 @@ stage.template { * `TrimSpace` returns a slice of the string s, with all leading and trailing white space removed, as defined by Unicode. * `TrimPrefix` and `TrimSuffix` trim the supplied prefix or suffix, respectively. + Examples: + ```river stage.template { source = "output" @@ -1361,6 +1272,7 @@ stage.template { ``` ##### Regex + `regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string. Inside the replacement string, `$` characters are interpreted as in Expand functions, so for instance, $1 represents the first captured @@ -1382,7 +1294,9 @@ stage.template { ``` ##### Hash and Sha2Hash -`Hash` returns a `Sha3_256` hash of the string, represented as a hexadecimal number of 64 digits. You can use it to obfuscate sensitive data and PII in the logs. It requires a (fixed) salt value, to add complexity to low input domains (e.g., all possible social security numbers). + +`Hash` returns a `Sha3_256` hash of the string, represented as a hexadecimal number of 64 digits. You can use it to obfuscate sensitive data and PII in the logs. +It requires a (fixed) salt value, to add complexity to low input domains (e.g., all possible social security numbers). `Sha2Hash` returns a `Sha2_256` of the string which is faster and less CPU-intensive than `Hash`, however it is less secure. Examples: @@ -1423,6 +1337,7 @@ stage.tenant { This stage extracts the tenant ID from the `customer_id` field after parsing the log entry as JSON in the shared extracted map: + ```river stage.json { expressions = { "customer_id" = "" } @@ -1433,6 +1348,7 @@ stage.tenant { ``` The final example extracts the tenant ID from a label set by a previous stage: + ```river stage.labels { "namespace" = "k8s_namespace" @@ -1469,8 +1385,8 @@ the stage should attempt to parse as a timestamp. The `format` field defines _how_ that source should be parsed. -First off, the `format` can be set to one of the following shorthand values for -commonly-used forms: +First off, the `format` can be set to one of the following shorthand values for commonly-used forms: + ``` ANSIC: Mon Jan _2 15:04:05 2006 UnixDate: Mon Jan _2 15:04:05 MST 2006 @@ -1486,6 +1402,7 @@ RFC3339Nano: 2006-01-02T15:04:05.999999999-07:00 Additionally, support for common Unix timestamps is supported with the following format values: + ``` Unix: 1562708916 or with fractions 1562708916.000000123 UnixMs: 1562708916414 @@ -1710,7 +1627,7 @@ loki.process "example" { } } ``` -The `json` stage extracts the IP address from the `client_ip` key in the log line. +The `json` stage extracts the IP address from the `client_ip` key in the log line. Then the extracted `ip` value is given as source to geoip stage. The geoip stage performs a lookup on the IP and populates the shared map with the data from the city database results in addition to the custom lookups. Lastly, the custom lookup fields from the shared map are added as labels. ## Exported fields @@ -1730,6 +1647,7 @@ The following fields are exported and can be referenced by other components: `loki.process` does not expose any component-specific debug information. ## Debug metrics + * `loki_process_dropped_lines_total` (counter): Number of lines dropped as part of a processing stage. * `loki_process_dropped_lines_by_label_total` (counter): Number of lines dropped when `by_label_name` is non-empty in [stage.limit][]. diff --git a/docs/sources/flow/reference/components/loki.relabel.md b/docs/sources/reference/components/loki.relabel.md similarity index 70% rename from docs/sources/flow/reference/components/loki.relabel.md rename to docs/sources/reference/components/loki.relabel.md index 04f548da51..389372467e 100644 --- a/docs/sources/flow/reference/components/loki.relabel.md +++ b/docs/sources/reference/components/loki.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.relabel/ description: Learn about loki.relabel title: loki.relabel --- @@ -27,10 +22,9 @@ calling the function in the `rules` export field. If you're looking for a way to process the log entry contents, take a look at [the `loki.process` component][loki.process] instead. -[loki.process]: {{< relref "./loki.process.md" >}} +[loki.process]: ../loki.process/ -Multiple `loki.relabel` components can be specified by giving them -different labels. +Multiple `loki.relabel` components can be specified by giving them different labels. ## Usage @@ -50,32 +44,32 @@ loki.relabel "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(receiver)` | Where to forward log entries after relabeling. | | yes -`max_cache_size` | `int` | The maximum number of elements to hold in the relabeling cache | 10,000 | no +Name | Type | Description | Default | Required +-----------------|------------------|----------------------------------------------------------------|---------|--------- +`forward_to` | `list(receiver)` | Where to forward log entries after relabeling. | | yes +`max_cache_size` | `int` | The maximum number of elements to hold in the relabeling cache | 10,000 | no ## Blocks The following blocks are supported inside the definition of `loki.relabel`: -Hierarchy | Name | Description | Required ---------- | ---- | ----------- | -------- -rule | [rule][] | Relabeling rules to apply to received log entries. | no +Hierarchy | Name | Description | Required +----------|----------|----------------------------------------------------|--------- +rule | [rule][] | Relabeling rules to apply to received log entries. | no [rule]: #rule-block ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block-logs.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- -`receiver` | `receiver` | The input receiver where log lines are sent to be relabeled. +Name | Type | Description +-----------|----------------|------------------------------------------------------------- +`receiver` | `receiver` | The input receiver where log lines are sent to be relabeled. `rules` | `RelabelRules` | The currently configured relabeling rules. ## Component health @@ -97,8 +91,7 @@ In those cases, exported fields are kept at their last healthy values. ## Example -The following example creates a `loki.relabel` component that only forwards -entries whose 'level' value is set to 'error'. +The following example creates a `loki.relabel` component that only forwards entries whose 'level' value is set to 'error'. ```river loki.relabel "keep_error_only" { diff --git a/docs/sources/flow/reference/components/loki.rules.kubernetes.md b/docs/sources/reference/components/loki.rules.kubernetes.md similarity index 77% rename from docs/sources/flow/reference/components/loki.rules.kubernetes.md rename to docs/sources/reference/components/loki.rules.kubernetes.md index ffb932df24..3b3f6a28b8 100644 --- a/docs/sources/flow/reference/components/loki.rules.kubernetes.md +++ b/docs/sources/reference/components/loki.rules.kubernetes.md @@ -6,7 +6,7 @@ labels: # loki.rules.kubernetes -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.rules.kubernetes` discovers `PrometheusRule` Kubernetes resources and loads them into a Loki instance. @@ -22,7 +22,7 @@ loads them into a Loki instance. This component requires [Role-based access control (RBAC)][] to be set up in Kubernetes for {{< param "PRODUCT_ROOT_NAME" >}} to access it via the Kubernetes REST API. -Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +[Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ {{< /admonition >}} [Kubernetes label selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors @@ -41,18 +41,18 @@ loki.rules.kubernetes "LABEL" { `loki.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required --------------------------|------------|----------------------------------------------------------|---------|--------- -`address` | `string` | URL of the Loki ruler. | | yes -`tenant_id` | `string` | Loki tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no -`sync_interval` | `duration` | Amount of time between reconciliations with Loki. | "30s" | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------|---------|--------- +`address` | `string` | URL of the Loki ruler. | | yes +`tenant_id` | `string` | Loki tenant ID. | | no +`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no +`sync_interval` | `duration` | Amount of time between reconciliations with Loki. | "30s" | no `loki_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_ROOT_NAME" >}} deployments. | "agent" | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -109,9 +109,9 @@ The `label_selector` block describes a Kubernetes label selector for rule or nam The following arguments are supported: -Name | Type | Description | Default | Required ----------------|---------------|---------------------------------------------------|-----------------------------|--------- -`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | yes +Name | Type | Description | Default | Required +---------------|---------------|---------------------------------------------------|---------|--------- +`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | yes When the `match_labels` argument is empty, all resources will be matched. @@ -121,11 +121,11 @@ The `match_expression` block describes a Kubernetes label match expression for r The following arguments are supported: -Name | Type | Description | Default | Required ------------|----------------|----------------------------------------------------|---------|--------- -`key` | `string` | The label name to match against. | | yes -`operator` | `string` | The operator to use when matching. | | yes -`values` | `list(string)` | The values used when matching. | | no +Name | Type | Description | Default | Required +-----------|----------------|------------------------------------|---------|--------- +`key` | `string` | The label name to match against. | | yes +`operator` | `string` | The operator to use when matching. | | yes +`values` | `list(string)` | The values used when matching. | | no The `operator` argument should be one of the following strings: @@ -135,19 +135,19 @@ The `operator` argument should be one of the following strings: ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -176,8 +176,8 @@ actually exist. ## Debug metrics -Metric Name | Type | Description -----------------------------------------------|-------------|------------------------------------------------------------------------- +Metric Name | Type | Description +---------------------------------------------|-------------|------------------------------------------------------------------------- `loki_rules_config_updates_total` | `counter` | Number of times the configuration has been updated. `loki_rules_events_total` | `counter` | Number of events processed, partitioned by event type. `loki_rules_events_failed_total` | `counter` | Number of events that failed to be processed, partitioned by event type. diff --git a/docs/sources/flow/reference/components/loki.source.api.md b/docs/sources/reference/components/loki.source.api.md similarity index 83% rename from docs/sources/flow/reference/components/loki.source.api.md rename to docs/sources/reference/components/loki.source.api.md index cc508ad976..186a265514 100644 --- a/docs/sources/flow/reference/components/loki.source.api.md +++ b/docs/sources/reference/components/loki.source.api.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.api/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.api/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.api/ description: Learn about loki.source.api title: loki.source.api --- @@ -13,9 +8,10 @@ title: loki.source.api `loki.source.api` receives log entries over HTTP and forwards them to other `loki.*` components. -The HTTP API exposed is compatible with [Loki push API][loki-push-api] and the `logproto` format. This means that other [`loki.write`][loki.write] components can be used as a client and send requests to `loki.source.api` which enables using the Agent as a proxy for logs. +The HTTP API exposed is compatible with [Loki push API][loki-push-api] and the `logproto` format. +This means that other [`loki.write`][loki.write] components can be used as a client and send requests to `loki.source.api` which enables using the Agent as a proxy for logs. -[loki.write]: {{< relref "./loki.write.md" >}} +[loki.write]: ../loki.write/ [loki-push-api]: https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki ## Usage @@ -39,7 +35,7 @@ The component will start HTTP server on the configured port and address with the - `/api/v1/raw` - internally reroutes to `/loki/api/v1/raw` -[promtail-push-api]: /docs/loki/latest/clients/promtail/configuration/#loki_push_api +[promtail-push-api]: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#loki_push_api ## Arguments @@ -55,7 +51,7 @@ Name | Type | Description The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks @@ -69,7 +65,7 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/reference/components/loki.source.awsfirehose.md similarity index 63% rename from docs/sources/flow/reference/components/loki.source.awsfirehose.md rename to docs/sources/reference/components/loki.source.awsfirehose.md index 2d43d6f82b..3b25e9e2c1 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/reference/components/loki.source.awsfirehose.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.awsfirehose/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.awsfirehose/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.awsfirehose/ description: Learn about loki.source.awsfirehose title: loki.source.awsfirehose --- @@ -36,21 +31,21 @@ the raw records to Loki. The decoding process goes as follows: The component exposes some internal labels, available for relabeling. The following tables describes internal labels available in records coming from any source. -| Name | Description | Example | -|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| -| `__aws_firehose_request_id` | Firehose request ID. | `a1af4300-6c09-4916-ba8f-12f336176246` | -| `__aws_firehose_source_arn` | Firehose delivery stream ARN. | `arn:aws:firehose:us-east-2:123:deliverystream/aws_firehose_test_stream` | +| Name | Description | Example | +|-----------------------------|-------------------------------|--------------------------------------------------------------------------| +| `__aws_firehose_request_id` | Firehose request ID. | `a1af4300-6c09-4916-ba8f-12f336176246` | +| `__aws_firehose_source_arn` | Firehose delivery stream ARN. | `arn:aws:firehose:us-east-2:123:deliverystream/aws_firehose_test_stream` | If the source of the Firehose record is CloudWatch logs, the request is further decoded and enriched with even more labels, exposed as follows: -| Name | Description | Example | -|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| -| `__aws_owner` | The AWS Account ID of the originating log data. | `111111111111` | -| `__aws_cw_log_group` | The log group name of the originating log data. | `CloudTrail/logs` | -| `__aws_cw_log_stream` | The log stream name of the originating log data. | `111111111111_CloudTrail/logs_us-east-1` | -| `__aws_cw_matched_filters` | The list of subscription filter names that match the originating log data. The list is encoded as a comma-separated list. | `Destination,Destination2` | -| `__aws_cw_msg_type` | Data messages will use the `DATA_MESSAGE` type. Sometimes CloudWatch Logs may emit Kinesis Data Streams records with a `CONTROL_MESSAGE` type, mainly for checking if the destination is reachable. | `DATA_MESSAGE` | +| Name | Description | Example | +|----------------------------|---------------------------------------------------------------------------------------------------------------------------|------------------------------------------| +| `__aws_owner` | The AWS Account ID of the originating log data. | `111111111111` | +| `__aws_cw_log_group` | The log group name of the originating log data. | `CloudTrail/logs` | +| `__aws_cw_log_stream` | The log stream name of the originating log data. | `111111111111_CloudTrail/logs_us-east-1` | +| `__aws_cw_matched_filters` | The list of subscription filter names that match the originating log data. The list is encoded as a comma-separated list. | `Destination,Destination2` | +| `__aws_cw_msg_type` | Data messages will use the `DATA_MESSAGE` type. Sometimes CloudWatch Logs may emit Kinesis Data Streams records with a `CONTROL_MESSAGE` type, mainly for checking if the destination is reachable. | `DATA_MESSAGE` | See [Examples](#example) for a full example configuration showing how to enrich each log entry with these labels. @@ -68,8 +63,7 @@ loki.source.awsfirehose "LABEL" { The component will start an HTTP server on the configured port and address with the following endpoints: -- `/awsfirehose/api/v1/push` - accepting `POST` requests compatible - with [AWS Firehose HTTP Specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). +- `/awsfirehose/api/v1/push` - accepting `POST` requests compatible with [AWS Firehose HTTP Specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). ## Arguments @@ -86,28 +80,27 @@ The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.awsfirehose`: | Hierarchy | Name | Description | Required | - |-----------|----------|----------------------------------------------------|----------| +|-----------|----------|----------------------------------------------------|----------| | `http` | [http][] | Configures the HTTP server that receives requests. | no | | `grpc` | [grpc][] | Configures the gRPC server that receives requests. | no | [http]: #http - [grpc]: #grpc ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Exported fields @@ -119,7 +112,8 @@ The following blocks are supported inside the definition of `loki.source.awsfire ## Debug metrics -The following are some of the metrics that are exposed when this component is used. +The following are some of the metrics that are exposed when this component is used. + {{< admonition type="note" >}} The metrics include labels such as `status_code` where relevant, which you can use to measure request success rates. {{< /admonition >}} diff --git a/docs/sources/reference/components/loki.source.azure_event_hubs.md b/docs/sources/reference/components/loki.source.azure_event_hubs.md new file mode 100644 index 0000000000..667ebba912 --- /dev/null +++ b/docs/sources/reference/components/loki.source.azure_event_hubs.md @@ -0,0 +1,149 @@ +--- + +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.azure_event_hubs/ +description: Learn about loki.source.azure_event_hubs +title: loki.source.azure_event_hubs +--- + +# loki.source.azure_event_hubs + +`loki.source.azure_event_hubs` receives Azure Event Hubs messages by making use of an Apache Kafka +endpoint on Event Hubs. For more information, see +the [Azure Event Hubs documentation](https://learn.microsoft.com/en-us/azure/event-hubs/azure-event-hubs-kafka-overview). + +To learn more about streaming Azure logs to an Azure Event Hubs, refer to +Microsoft's tutorial on how to [Stream Azure Active Directory logs to an Azure event hub](https://learn.microsoft.com/en-us/azure/active-directory/reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub). + +Note that an Apache Kafka endpoint is not available within the Basic pricing plan. For more information, see +the [Event Hubs pricing page](https://azure.microsoft.com/en-us/pricing/details/event-hubs/). + +Multiple `loki.source.azure_event_hubs` components can be specified by giving them +different labels. + +## Usage + +```river +loki.source.azure_event_hubs "LABEL" { + fully_qualified_namespace = "HOST:PORT" + event_hubs = EVENT_HUB_LIST + forward_to = RECEIVER_LIST + + authentication { + mechanism = "AUTHENTICATION_MECHANISM" + } +} +``` + +## Arguments + +`loki.source.azure_event_hubs` supports the following arguments: + +Name | Type | Description | Default | Required +----------------------------|----------------------|--------------------------------------------------------------------|----------------------------------|--------- +`fully_qualified_namespace` | `string` | Event hub namespace. | | yes +`event_hubs` | `list(string)` | Event Hubs to consume. | | yes +`group_id` | `string` | The Kafka consumer group id. | `"loki.source.azure_event_hubs"` | no +`assignor` | `string` | The consumer group rebalancing strategy to use. | `"range"` | no +`use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from Azure Event Hub. | `false` | no +`labels` | `map(string)` | The labels to associate with each received event. | `{}` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no +`disallow_custom_messages` | `bool` | Whether to ignore messages that don't match the [schema](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema) for Azure resource logs. | `false` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no + +The `fully_qualified_namespace` argument must refer to a full `HOST:PORT` that points to your event hub, such as `NAMESPACE.servicebus.windows.net:9093`. +The `assignor` argument must be set to one of `"range"`, `"roundrobin"`, or `"sticky"`. + +The `relabel_rules` field can make use of the `rules` export value from a +`loki.relabel` component to apply one or more relabeling rules to log entries +before they're forwarded to the list of receivers in `forward_to`. + +### Labels + +The `labels` map is applied to every message that the component reads. + +The following internal labels prefixed with `__` are available but are discarded if not relabeled: + +- `__meta_kafka_message_key` +- `__meta_kafka_topic` +- `__meta_kafka_partition` +- `__meta_kafka_member_id` +- `__meta_kafka_group_id` +- `__azure_event_hubs_category` + +## Blocks + +The following blocks are supported inside the definition of `loki.source.azure_event_hubs`: + +Hierarchy | Name | Description | Required +---------------|------------------|----------------------------------------------------|--------- +authentication | [authentication] | Authentication configuration with Azure Event Hub. | yes + +[authentication]: #authentication-block + +### authentication block + +The `authentication` block defines the authentication method when communicating with Azure Event Hub. + +Name | Type | Description | Default | Required +--------------------|----------------|---------------------------------------------------------------------------|---------|--------- +`mechanism` | `string` | Authentication mechanism. | | yes +`connection_string` | `string` | Event Hubs ConnectionString for authentication on Azure Cloud. | | no +`scopes` | `list(string)` | Access token scopes. Default is `fully_qualified_namespace` without port. | | no + +`mechanism` supports the values `"connection_string"` and `"oauth"`. If `"connection_string"` is used, +you must set the `connection_string` attribute. If `"oauth"` is used, you must configure one of the supported credential +types as documented +here: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#credential-types via environment +variables or Azure CLI. + +## Exported fields + +`loki.source.azure_event_hubs` does not export any fields. + +## Component health + +`loki.source.azure_event_hubs` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`loki.source.azure_event_hubs` does not expose additional debug info. + +## Example + +This example consumes messages from Azure Event Hub and uses OAuth to authenticate itself. + +```river +loki.source.azure_event_hubs "example" { + fully_qualified_namespace = "my-ns.servicebus.windows.net:9093" + event_hubs = ["gw-logs"] + forward_to = [loki.write.example.receiver] + + authentication { + mechanism = "oauth" + } +} + +loki.write "example" { + endpoint { + url = "loki:3100/api/v1/push" + } +} +``` + + + +## Compatible components + +`loki.source.azure_event_hubs` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) + + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + diff --git a/docs/sources/flow/reference/components/loki.source.cloudflare.md b/docs/sources/reference/components/loki.source.cloudflare.md similarity index 82% rename from docs/sources/flow/reference/components/loki.source.cloudflare.md rename to docs/sources/reference/components/loki.source.cloudflare.md index dbbd2e57b1..ab556885f6 100644 --- a/docs/sources/flow/reference/components/loki.source.cloudflare.md +++ b/docs/sources/reference/components/loki.source.cloudflare.md @@ -1,10 +1,6 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.cloudflare/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.cloudflare/ + +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.cloudflare/ description: Learn about loki.source.cloudflare title: loki.source.cloudflare --- @@ -36,16 +32,16 @@ loki.source.cloudflare "LABEL" { `loki.source.cloudflare` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | -------------------- | -------------------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`api_token` | `string` | The API token to authenticate with. | | yes -`zone_id` | `string` | The Cloudflare zone ID to use. | | yes -`labels` | `map(string)` | The labels to associate with incoming log entries. | `{}` | no -`workers` | `int` | The number of workers to use for parsing logs. | `3` | no -`pull_range` | `duration` | The timeframe to fetch for each pull request. | `"1m"` | no -`fields_type` | `string` | The set of fields to fetch for log entries. | `"default"` | no -`additional_fields` | `list(string)` | The additional list of fields to supplement those provided via `fields_type`. | | no +Name | Type | Description | Default | Required +--------------------|----------------------|-------------------------------------------------------------------------------|-------------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`api_token` | `string` | The API token to authenticate with. | | yes +`zone_id` | `string` | The Cloudflare zone ID to use. | | yes +`labels` | `map(string)` | The labels to associate with incoming log entries. | `{}` | no +`workers` | `int` | The number of workers to use for parsing logs. | `3` | no +`pull_range` | `duration` | The timeframe to fetch for each pull request. | `"1m"` | no +`fields_type` | `string` | The set of fields to fetch for log entries. | `"default"` | no +`additional_fields` | `list(string)` | The additional list of fields to supplement those provided via `fields_type`. | | no By default `loki.source.cloudflare` fetches logs with the `default` set of @@ -74,6 +70,7 @@ plus any extra fields provided via `additional_fields` argument. ``` "BotScore", "BotScoreSrc", "BotTags", "ClientRequestBytes", "ClientSrcPort", "ClientXRequestedWith", "CacheTieredFill", "EdgeResponseCompressionRatio", "EdgeServerIP", "FirewallMatchesSources", "FirewallMatchesActions", "FirewallMatchesRuleIDs", "OriginResponseBytes", "OriginResponseTime", "ClientDeviceType", "WAFFlags", "WAFMatchedVar", "EdgeColoID", "RequestHeaders", "ResponseHeaders", "ClientRequestSource"` ``` + plus any extra fields provided via `additional_fields` argument (this is still relevant in this case if new fields are made available via Cloudflare API but are not yet included in `all`). * `custom` includes only the fields defined in `additional_fields`. @@ -95,6 +92,7 @@ The last timestamp fetched by the component is recorded in the All incoming Cloudflare log entries are in JSON format. You can make use of the `loki.process` component and a JSON processing stage to extract more labels or change the log line format. A sample log looks like this: + ```json { "CacheCacheStatus": "miss", @@ -165,7 +163,6 @@ change the log line format. A sample log looks like this: } ``` - ## Exported fields `loki.source.cloudflare` does not export any fields. @@ -181,8 +178,7 @@ configuration. * Whether the target is ready and reading logs from the API. * The Cloudflare zone ID. * The last error reported, if any. -* The stored positions file entry, as the combination of zone_id, labels and - last fetched timestamp. +* The stored positions file entry, as the combination of zone_id, labels and last fetched timestamp. * The last timestamp fetched. * The set of fields being fetched. diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/reference/components/loki.source.docker.md similarity index 55% rename from docs/sources/flow/reference/components/loki.source.docker.md rename to docs/sources/reference/components/loki.source.docker.md index 09b88a7436..c8f16e2d90 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/reference/components/loki.source.docker.md @@ -1,23 +1,14 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.docker/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.docker/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.docker/ description: Learn about loki.source.docker title: loki.source.docker --- # loki.source.docker -`loki.source.docker` reads log entries from Docker containers and forwards them -to other `loki.*` components. Each component can read from a single Docker -daemon. +`loki.source.docker` reads log entries from Docker containers and forwards them to other `loki.*` components. Each component can read from a single Docker daemon. -Multiple `loki.source.docker` components can be specified by giving them -different labels. +Multiple `loki.source.docker` components can be specified by giving them different labels. ## Usage @@ -30,32 +21,31 @@ loki.source.docker "LABEL" { ``` ## Arguments -The component starts a new reader for each of the given `targets` and fans out -log entries to the list of receivers passed in `forward_to`. +The component starts a new reader for each of the given `targets` and fans out log entries to the list of receivers passed in `forward_to`. `loki.source.docker` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | -------------------- | -------------------- | ------- | -------- -`host` | `string` | Address of the Docker daemon. | | yes -`targets` | `list(map(string))` | List of containers to read logs from. | | yes -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`labels` | `map(string)` | The default set of labels to apply on entries. | `"{}"` | no -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `"{}"` | no -`refresh_interval` | `duration` | The refresh interval to use when connecting to the Docker daemon over HTTP(S). | `"60s"` | no +Name | Type | Description | Default | Required +-------------------|----------------------|--------------------------------------------------------------------------------|---------|--------- +`host` | `string` | Address of the Docker daemon. | | yes +`targets` | `list(map(string))` | List of containers to read logs from. | | yes +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`labels` | `map(string)` | The default set of labels to apply on entries. | `"{}"` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `"{}"` | no +`refresh_interval` | `duration` | The refresh interval to use when connecting to the Docker daemon over HTTP(S). | `"60s"` | no ## Blocks The following blocks are supported inside the definition of `loki.source.docker`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | HTTP client settings when connecting to the endpoint. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | HTTP client settings when connecting to the endpoint. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to an `basic_auth` block defined inside a `client` block. @@ -71,38 +61,33 @@ or HTTPS and has no effect when connecting via a `unix:///` socket ### client block -The `client` block configures settings used to connect to HTTP(S) Docker -daemons. +The `client` block configures settings used to connect to HTTP(S) Docker daemons. -{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-config-block.md" source="alloy" version="" >}} ### basic_auth block -The `basic_auth` block configures basic authentication for HTTP(S) Docker -daemons. +The `basic_auth` block configures basic authentication for HTTP(S) Docker daemons. -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -The `authorization` block configures custom authorization to use for the Docker -daemon. +The `authorization` block configures custom authorization to use for the Docker daemon. -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -The `oauth2` block configures OAuth2 authorization to use for the Docker -daemon. +The `oauth2` block configures OAuth2 authorization to use for the Docker daemon. -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -The `tls_config` block configures TLS settings for connecting to HTTPS Docker -daemons. +The `tls_config` block configures TLS settings for connecting to HTTPS Docker daemons. -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -135,7 +120,7 @@ If the target's argument contains multiple entries with the same container ID (for example as a result of `discovery.docker` picking up multiple exposed ports or networks), `loki.source.docker` will deduplicate them, and only keep the first of each container ID instances, based on the -`__meta_docker_container_id` label. As such, the Docker daemon is queried +`__meta_docker_container_id` label. As such, the Docker daemon is queried for each container ID only once, and only one target will be available in the component's debug info. @@ -151,7 +136,7 @@ discovery.docker "linux" { loki.source.docker "default" { host = "unix:///var/run/docker.sock" - targets = discovery.docker.linux.targets + targets = discovery.docker.linux.targets forward_to = [loki.write.local.receiver] } diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/reference/components/loki.source.file.md similarity index 87% rename from docs/sources/flow/reference/components/loki.source.file.md rename to docs/sources/reference/components/loki.source.file.md index 683b66cabf..aba7803e26 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/reference/components/loki.source.file.md @@ -1,24 +1,19 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.file/ description: Learn about loki.source.file title: loki.source.file --- # loki.source.file -`loki.source.file` reads log entries from files and forwards them to other -`loki.*` components. +`loki.source.file` reads log entries from files and forwards them to other `loki.*` components. Multiple `loki.source.file` components can be specified by giving them different labels. {{< admonition type="note" >}} -`loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. Refer to the [File Globbing](#file-globbing) example for more information. +`loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. +Refer to the [File Globbing](#file-globbing) example for more information. {{< /admonition >}} ## Usage @@ -32,8 +27,7 @@ loki.source.file "LABEL" { ## Arguments -The component starts a new reader for each of the given `targets` and fans out -log entries to the list of receivers passed in `forward_to`. +The component starts a new reader for each of the given `targets` and fans out log entries to the list of receivers passed in `forward_to`. `loki.source.file` supports the following arguments: @@ -54,10 +48,10 @@ When set to true, only new logs will be read, ignoring the existing ones. The following blocks are supported inside the definition of `loki.source.file`: -| Hierarchy | Name | Description | Required | -| -------------- | ------------------ | ----------------------------------------------------------------- | -------- | -| decompression | [decompression][] | Configure reading logs from compressed files. | no | -| file_watch | [file_watch][] | Configure how often files should be polled from disk for changes. | no | +| Hierarchy | Name | Description | Required | +|---------------|-------------------|-------------------------------------------------------------------|----------| +| decompression | [decompression][] | Configure reading logs from compressed files. | no | +| file_watch | [file_watch][] | Configure how often files should be polled from disk for changes. | no | [decompression]: #decompression-block [file_watch]: #file_watch-block @@ -130,8 +124,7 @@ configuration. If the decompression feature is deactivated, the component will continuously monitor and 'tail' the files. In this mode, upon reaching the end of a file, the component remains active, awaiting and reading new entries in real-time as they are appended. -Each element in the list of `targets` as a set of key-value pairs called -_labels_. +Each element in the list of `targets` as a set of key-value pairs called _labels_. The set of targets can either be _static_, or dynamically provided periodically by a service discovery component. The special label `__path__` _must always_ be present and must point to the absolute path of the file to read from. @@ -154,7 +147,7 @@ If a file is removed from the `targets` list, its positions file entry is also removed. When it's added back on, `loki.source.file` starts reading it from the beginning. -[cmd-args]: {{< relref "../cli/run.md" >}} +[cmd-args]: ../../cli/run/ ## Examples diff --git a/docs/sources/flow/reference/components/loki.source.gcplog.md b/docs/sources/reference/components/loki.source.gcplog.md similarity index 84% rename from docs/sources/flow/reference/components/loki.source.gcplog.md rename to docs/sources/reference/components/loki.source.gcplog.md index d57cf28cc0..77c7ebb8c3 100644 --- a/docs/sources/flow/reference/components/loki.source.gcplog.md +++ b/docs/sources/reference/components/loki.source.gcplog.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gcplog/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gcplog/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.gcplog/ description: Learn about loki.source.gcplog title: loki.source.gcplog --- @@ -18,8 +13,7 @@ load balancers, or Kubernetes clusters running on GCP by making use of Pub/Sub The component uses either the 'push' or 'pull' strategy to retrieve log entries and forward them to the list of receivers in `forward_to`. -Multiple `loki.source.gcplog` components can be specified by giving them -different labels. +Multiple `loki.source.gcplog` components can be specified by giving them different labels. ## Usage @@ -99,30 +93,28 @@ push requests from GCP's Pub/Sub servers. The following arguments can be used to configure the `push` block. Any omitted fields take their default values. -| Name | Type | Description | Default | Required | -|-----------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| -| `graceful_shutdown_timeout` | `duration` | Timeout for servers graceful shutdown. If configured, should be greater than zero. | "30s" | no | -| `push_timeout` | `duration` | Sets a maximum processing time for each incoming GCP log entry. | `"0s"` | no | -| `labels` | `map(string)` | Additional labels to associate with incoming entries. | `"{}"` | no | -| `use_incoming_timestamp` | `bool` | Whether to use the incoming entry timestamp. | `false` | no | -| `use_full_line` | `bool` | Send the full line from Cloud Logging even if `textPayload` is available. By default, if `textPayload` is present in the line, then it's used as log line | `false` | no | +| Name | Type | Description | Default | Required | +|-----------------------------|---------------|------------------------------------------------------------------------------------|---------|----------| +| `graceful_shutdown_timeout` | `duration` | Timeout for servers graceful shutdown. If configured, should be greater than zero. | "30s" | no | +| `push_timeout` | `duration` | Sets a maximum processing time for each incoming GCP log entry. | `"0s"` | no | +| `labels` | `map(string)` | Additional labels to associate with incoming entries. | `"{}"` | no | +| `use_incoming_timestamp` | `bool` | Whether to use the incoming entry timestamp. | `false` | no | +| `use_full_line` | `bool` | Send the full line from Cloud Logging even if `textPayload` is available. By default, if `textPayload` is present in the line, then it's used as log line | `false` | no | -The server listens for POST requests from GCP's Push subscriptions on -`HOST:PORT/gcp/api/v1/push`. +The server listens for POST requests from GCP's Push subscriptions on `HOST:PORT/gcp/api/v1/push`. By default, for both strategies the component assigns the log entry timestamp -as the time it was processed, except if `use_incoming_timestamp` is set to -true. +as the time it was processed, except if `use_incoming_timestamp` is set to true. The `labels` map is applied to every entry that passes through the component. ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.gelf.md b/docs/sources/reference/components/loki.source.gelf.md similarity index 69% rename from docs/sources/flow/reference/components/loki.source.gelf.md rename to docs/sources/reference/components/loki.source.gelf.md index eec3ef5c9a..0d3c508c51 100644 --- a/docs/sources/flow/reference/components/loki.source.gelf.md +++ b/docs/sources/reference/components/loki.source.gelf.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gelf/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gelf/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.gelf/ description: Learn about loki.source.gelf title: loki.source.gelf --- @@ -31,11 +26,11 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.gelf` supports the following arguments: -Name | Type | Description | Default | Required ------------- |----------------------|--------------------------------------------------------------------------------|----------------------------| -------- -`listen_address` | `string` | UDP address and port to listen for Graylog messages. | `0.0.0.0:12201` | no -`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed | `false` | no -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no +Name | Type | Description | Default | Required +-------------------------|----------------|----------------------------------------------------------------------------|-----------------|--------- +`listen_address` | `string` | UDP address and port to listen for Graylog messages. | `0.0.0.0:12201` | no +`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed | `false` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no > **NOTE**: GELF logs can be sent uncompressed or compressed with GZIP or ZLIB. @@ -56,7 +51,7 @@ All labels starting with `__` are removed prior to forwarding log entries. To keep these labels, relabel them using a [loki.relabel][] component and pass its `rules` export to the `relabel_rules` argument. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Component health diff --git a/docs/sources/flow/reference/components/loki.source.heroku.md b/docs/sources/reference/components/loki.source.heroku.md similarity index 84% rename from docs/sources/flow/reference/components/loki.source.heroku.md rename to docs/sources/reference/components/loki.source.heroku.md index 62aaff4db7..888f2ab99e 100644 --- a/docs/sources/flow/reference/components/loki.source.heroku.md +++ b/docs/sources/reference/components/loki.source.heroku.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.heroku/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.heroku/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.heroku/ description: Learn about loki.source.heroku title: loki.source.heroku --- @@ -17,7 +12,8 @@ and forwards them to other `loki.*` components. The component starts a new heroku listener for the given `listener` block and fans out incoming entries to the list of receivers in `forward_to`. -Before using `loki.source.heroku`, Heroku should be configured with the URL where the Agent will be listening. Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI with a command like the following: +Before using `loki.source.heroku`, Heroku should be configured with the URL where the Agent will be listening. +Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI with a command like the following: ```shell heroku drains:add [http|https]://HOSTNAME:PORT/heroku/api/v1/drain -a HEROKU_APP_NAME @@ -68,11 +64,11 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Labels diff --git a/docs/sources/flow/reference/components/loki.source.journal.md b/docs/sources/reference/components/loki.source.journal.md similarity index 59% rename from docs/sources/flow/reference/components/loki.source.journal.md rename to docs/sources/reference/components/loki.source.journal.md index de776c97b7..3b8b1905c3 100644 --- a/docs/sources/flow/reference/components/loki.source.journal.md +++ b/docs/sources/reference/components/loki.source.journal.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.journal/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.journal/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.journal/ description: Learn about loki.source.journal title: loki.source.journal --- # loki.source.journal -`loki.source.journal` reads from the systemd journal and forwards them to other -`loki.*` components. +`loki.source.journal` reads from the systemd journal and forwards them to other `loki.*` components. -Multiple `loki.source.journal` components can be specified by giving them -different labels. +Multiple `loki.source.journal` components can be specified by giving them different labels. ## Usage @@ -31,15 +24,15 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.journal` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`format_as_json` | `bool` | Whether to forward the original journal entry as JSON. | `false` | no -`max_age` | `duration` | The oldest relative time from process start that will be read. | `"7h"` | no -`path` | `string` | Path to a directory to read entries from. | `""` | no -`matches` | `string` | Journal matches to filter. The `+` character is not supported, only logical AND matches will be added. | `""` | no -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no -`labels` | `map(string)` | The labels to apply to every log coming out of the journal. | `{}` | no +Name | Type | Description | Default | Required +-----------------|----------------------|--------------------------------------------------------------------------------------------------------|---------|--------- +`format_as_json` | `bool` | Whether to forward the original journal entry as JSON. | `false` | no +`max_age` | `duration` | The oldest relative time from process start that will be read. | `"7h"` | no +`path` | `string` | Path to a directory to read entries from. | `""` | no +`matches` | `string` | Journal matches to filter. The `+` character is not supported, only logical AND matches will be added. | `""` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no +`labels` | `map(string)` | The labels to apply to every log coming out of the journal. | `{}` | no > **NOTE**: A `job` label is added with the full name of the component `loki.source.journal.LABEL`. @@ -60,17 +53,16 @@ pattern of `__journal_FIELDNAME` and will be dropped before sending to the list of receivers specified in `forward_to`. To keep these labels, use the `relabel_rules` argument and relabel them to not be prefixed with `__`. -> **NOTE**: many field names from journald start with an `_`, such as -> `_systemd_unit`. The final internal label name would be -> `__journal__systemd_unit`, with _two_ underscores between `__journal` and -> `systemd_unit`. +{{< admonition type="note" >}} +Many field names from journald start with an `_`, such as `_systemd_unit`. +The final internal label name would be `__journal__systemd_unit`, with _two_ underscores between `__journal` and `systemd_unit`. +{{< /admonition >}} -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Component health -`loki.source.journal` is only reported as unhealthy if given an invalid -configuration. +`loki.source.journal` is only reported as unhealthy if given an invalid configuration. ## Debug Metrics diff --git a/docs/sources/flow/reference/components/loki.source.kafka.md b/docs/sources/reference/components/loki.source.kafka.md similarity index 92% rename from docs/sources/flow/reference/components/loki.source.kafka.md rename to docs/sources/reference/components/loki.source.kafka.md index e7aaa2e599..bb85c6d981 100644 --- a/docs/sources/flow/reference/components/loki.source.kafka.md +++ b/docs/sources/reference/components/loki.source.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kafka/ description: Learn about loki.source.kafka title: loki.source.kafka --- @@ -22,8 +17,7 @@ writing events to at least one topic. Follow the steps in the [Kafka Quick Start](https://kafka.apache.org/documentation/#quickstart) to get started with Kafka. -Multiple `loki.source.kafka` components can be specified by giving them -different labels. +Multiple `loki.source.kafka` components can be specified by giving them different labels. ## Usage @@ -72,7 +66,7 @@ All labels starting with `__` are removed prior to forwarding log entries. To keep these labels, relabel them using a [loki.relabel][] component and pass its `rules` export to the `relabel_rules` argument. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks @@ -87,11 +81,8 @@ The following blocks are supported inside the definition of `loki.source.kafka`: authentication > sasl_config > oauth_config | [oauth_config] | Optional authentication configuration with Kafka brokers. | no [authentication]: #authentication-block - [tls_config]: #tls_config-block - [sasl_config]: #sasl_config-block - [oauth_config]: #oauth_config-block ### authentication block @@ -107,7 +98,7 @@ you must set the `tls_config` block. If `"sasl"` is used, you must set the `sasl ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### sasl_config block @@ -136,8 +127,7 @@ The `oauth_config` is required when the SASL mechanism is set to `OAUTHBEARER`. ## Component health -`loki.source.kafka` is only reported as unhealthy if given an invalid -configuration. +`loki.source.kafka` is only reported as unhealthy if given an invalid configuration. ## Debug information diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/reference/components/loki.source.kubernetes.md similarity index 56% rename from docs/sources/flow/reference/components/loki.source.kubernetes.md rename to docs/sources/reference/components/loki.source.kubernetes.md index 66194a3db4..1729137117 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/reference/components/loki.source.kubernetes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes/ description: Learn about loki.source.kubernetes labels: stage: experimental @@ -13,7 +8,7 @@ title: loki.source.kubernetes # loki.source.kubernetes -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.source.kubernetes` tails logs from Kubernetes containers using the Kubernetes API. It has the following benefits over `loki.source.file`: @@ -24,12 +19,11 @@ Kubernetes API. It has the following benefits over `loki.source.file`: * It doesn't require a DaemonSet to collect logs, so one {{< param "PRODUCT_ROOT_NAME" >}} could collect logs for the whole cluster. -> **NOTE**: Because `loki.source.kubernetes` uses the Kubernetes API to tail -> logs, it uses more network traffic and CPU consumption of Kubelets than -> `loki.source.file`. +{{< admonition type="note" >}} +Because `loki.source.kubernetes` uses the Kubernetes API to tail logs, it uses more network traffic and CPU consumption of Kubelets than `loki.source.file`. +{{< /admonition >}} -Multiple `loki.source.kubernetes` components can be specified by giving them -different labels. +Multiple `loki.source.kubernetes` components can be specified by giving them different labels. ## Usage @@ -47,21 +41,17 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of files to read from. | | yes -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|-------------------------------------------|---------|--------- +`targets` | `list(map(string))` | List of files to read from. | | yes +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes Each target in `targets` must have the following labels: -* `__meta_kubernetes_namespace` or `__pod_namespace__` to specify the namespace - of the pod to tail. -* `__meta_kubernetes_pod_name` or `__pod_name__` to specify the name of the pod - to tail. -* `__meta_kubernetes_pod_container_name` or `__pod_container_name__` to specify - the container within the pod to tail. -* `__meta_kubernetes_pod_uid` or `__pod_uid__` to specify the UID of the pod to - tail. +* `__meta_kubernetes_namespace` or `__pod_namespace__` to specify the namespace of the pod to tail. +* `__meta_kubernetes_pod_name` or `__pod_name__` to specify the name of the pod to tail. +* `__meta_kubernetes_pod_container_name` or `__pod_container_name__` to specify the container within the pod to tail. +* `__meta_kubernetes_pod_uid` or `__pod_uid__` to specify the UID of the pod to tail. By default, all of these labels are present when the output `discovery.kubernetes` is used. @@ -75,15 +65,15 @@ before the container has permanently terminated. The following blocks are supported inside the definition of `loki.source.kubernetes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|---------------------------------------------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -105,18 +95,18 @@ used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -125,29 +115,29 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### clustering (beta) -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes +Name | Type | Description | Default | Required +----------|--------|-----------------------------------------------------|---------|--------- +`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes When {{< param "PRODUCT_ROOT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.kubernetes` component instance opts-in to participating in the @@ -157,7 +147,7 @@ If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then th `loki.source.kubernetes` collects logs from every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields @@ -175,8 +165,7 @@ target: * The labels associated with the target. * The full set of labels which were found during service discovery. -* The most recent time a log line was read and forwarded to the next components - in the pipeline. +* The most recent time a log line was read and forwarded to the next components in the pipeline. * The most recent error from tailing, if any. ## Debug metrics diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md b/docs/sources/reference/components/loki.source.kubernetes_events.md similarity index 54% rename from docs/sources/flow/reference/components/loki.source.kubernetes_events.md rename to docs/sources/reference/components/loki.source.kubernetes_events.md index 85a1d59637..faae42fcc3 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md +++ b/docs/sources/reference/components/loki.source.kubernetes_events.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes_events/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes_events/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes_events/ description: Learn about loki.source.kubernetes_events title: loki.source.kubernetes_events --- @@ -14,8 +9,7 @@ title: loki.source.kubernetes_events `loki.source.kubernetes_events` tails events from the Kubernetes API and converts them into log lines to forward to other `loki` components. -Multiple `loki.source.kubernetes_events` components can be specified by giving them -different labels. +Multiple `loki.source.kubernetes_events` components can be specified by giving them different labels. ## Usage @@ -32,28 +26,27 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.kubernetes_events` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`job_name` | `string` | Value to use for `job` label for generated logs. | `"loki.source.kubernetes_events"` | no -`log_format` | `string` | Format of the log. | `"logfmt"` | no -`namespaces` | `list(string)` | Namespaces to watch for Events in. | `[]` | no -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|--------------------------------------------------|-----------------------------------|--------- +`job_name` | `string` | Value to use for `job` label for generated logs. | `"loki.source.kubernetes_events"` | no +`log_format` | `string` | Format of the log. | `"logfmt"` | no +`namespaces` | `list(string)` | Namespaces to watch for Events in. | `[]` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes By default, `loki.source.kubernetes_events` will watch for events in all namespaces. A list of explicit namespaces to watch can be provided in the `namespaces` argument. -By default, the generated log lines will be in the `logfmt` format. Use the -`log_format` argument to change it to `json`. These formats are also names of -LogQL parsers, which can be used for processing the logs. +By default, the generated log lines will be in the `logfmt` format. +Use the `log_format` argument to change it to `json`. +These formats are also names of LogQL parsers, which can be used for processing the logs. -> **NOTE**: When watching all namespaces, {{< param "PRODUCT_NAME" >}} must have permissions -> to watch events at the cluster scope (such as using a ClusterRoleBinding). If -> an explicit list of namespaces is provided, {{< param "PRODUCT_NAME" >}} only needs -> permissions to watch events for those namespaces. +{{< admonition type="note" >}} +When watching all namespaces, {{< param "PRODUCT_NAME" >}} must have permissions to watch events at the cluster scope (such as using a ClusterRoleBinding). +If an explicit list of namespaces is provided, {{< param "PRODUCT_NAME" >}} only needs permissions to watch events for those namespaces. +{{< /admonition >}} -Log lines generated by `loki.source.kubernetes_events` have the following -labels: +Log lines generated by `loki.source.kubernetes_events` have the following labels: * `namespace`: Namespace of the Kubernetes object involved in the event. * `job`: Value specified by the `job_name` argument. @@ -66,21 +59,21 @@ remove the job label, forward the output of `loki.source.kubernetes_events` to For compatibility with the `eventhandler` integration from static mode, `job_name` can be set to `"integrations/kubernetes/eventhandler"`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.kubernetes_events`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -96,23 +89,22 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -121,23 +113,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -145,13 +137,11 @@ Name | Type | Description ## Component health -`loki.source.kubernetes_events` is only reported as unhealthy if given an invalid -configuration. +`loki.source.kubernetes_events` is only reported as unhealthy if given an invalid configuration. ## Debug information -`loki.source.kubernetes_events` exposes the most recently read timestamp for -events in each watched namespace. +`loki.source.kubernetes_events` exposes the most recently read timestamp for events in each watched namespace. ## Debug metrics @@ -169,13 +159,12 @@ The data path is inside the directory configured by the `--storage.path` [comman In the Static mode's [eventhandler integration][eventhandler-integration], a `cache_path` argument is used to configure a positions file. In Flow mode, this argument is no longer necessary. -[cmd-args]: {{< relref "../cli/run.md" >}} -[eventhandler-integration]: {{< relref "../../../static/configuration/integrations/integrations-next/eventhandler-config.md" >}} +[cmd-args]: ../../cli/run/ +[eventhandler-integration]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/eventhandler-config/ ## Example -This example collects watches events in the `kube-system` namespace and -forwards them to a `loki.write` component so they are written to Loki. +This example collects watches events in the `kube-system` namespace and forwards them to a `loki.write` component so they are written to Loki. ```river loki.source.kubernetes_events "example" { diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/reference/components/loki.source.podlogs.md similarity index 53% rename from docs/sources/flow/reference/components/loki.source.podlogs.md rename to docs/sources/reference/components/loki.source.podlogs.md index 7c204593b2..884e31beb2 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/reference/components/loki.source.podlogs.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.podlogs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.podlogs/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.podlogs/ description: Learn about loki.source.podlogs labels: stage: experimental @@ -13,7 +8,7 @@ title: loki.source.podlogs # loki.source.podlogs -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.source.podlogs` discovers `PodLogs` resources on Kubernetes and, using the Kubernetes API, tails logs from Kubernetes containers of Pods specified by @@ -22,14 +17,14 @@ the discovered them. `loki.source.podlogs` is similar to `loki.source.kubernetes`, but uses custom resources rather than being fed targets from another Flow component. -> **NOTE**: Unlike `loki.source.kubernetes`, it is not possible to distribute -> responsibility of collecting logs across multiple {{< param "PRODUCT_ROOT_NAME" >}}s. To avoid collecting -> duplicate logs, only one {{< param "PRODUCT_ROOT_NAME" >}} should be running a `loki.source.podlogs` -> component. +{{< admonition type="note" >}} +Unlike `loki.source.kubernetes`, it is not possible to distribute responsibility of collecting logs across multiple {{< param "PRODUCT_ROOT_NAME" >}}s. +To avoid collecting duplicate logs, only one {{< param "PRODUCT_ROOT_NAME" >}} should be running a `loki.source.podlogs` component. +{{< /admonition >}} -> **NOTE**: Because `loki.source.podlogs` uses the Kubernetes API to tail logs, -> it uses more network traffic and CPU consumption of Kubelets than -> `loki.source.file`. +{{< admonition type="note" >}} +Because `loki.source.podlogs` uses the Kubernetes API to tail logs, it uses more network traffic and CPU consumption of Kubelets than `loki.source.file`. +{{< /admonition >}} Multiple `loki.source.podlogs` components can be specified by giving them different labels. @@ -49,9 +44,9 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.podlogs` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|-------------------------------------------|---------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes `loki.source.podlogs` searches for `PodLogs` resources on Kubernetes. Each `PodLogs` resource describes a set of pods to tail logs from. @@ -64,12 +59,12 @@ The `PodLogs` resource describes a set of Pods to collect logs from. > `monitoring.grafana.com/v1alpha2`, and is not compatible with `PodLogs` from > the {{< param "PRODUCT_ROOT_NAME" >}} Operator, which are version `v1alpha1`. -Field | Type | Description ------ | ---- | ----------- -`apiVersion` | string | `monitoring.grafana.com/v1alpha2` -`kind` | string | `PodLogs` -`metadata` | [ObjectMeta][] | Metadata for the PodLogs. -`spec` | [PodLogsSpec][] | Definition of what Pods to collect logs from. +Field | Type | Description +-------------|-----------------|---------------------------------------------- +`apiVersion` | string | `monitoring.grafana.com/v1alpha2` +`kind` | string | `PodLogs` +`metadata` | [ObjectMeta][] | Metadata for the PodLogs. +`spec` | [PodLogsSpec][] | Definition of what Pods to collect logs from. [ObjectMeta]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta [PodLogsSpec]: #podlogsspec @@ -78,39 +73,31 @@ Field | Type | Description `PodLogsSpec` describes a set of Pods to collect logs from. -Field | Type | Description ------ | ---- | ----------- -`selector` | [LabelSelector][] | Label selector of Pods to collect logs from. +Field | Type | Description +--------------------|-------------------|------------------------------------------------------------- +`selector` | [LabelSelector][] | Label selector of Pods to collect logs from. `namespaceSelector` | [LabelSelector][] | Label selector of Namespaces that Pods can be discovered in. -`relabelings` | [RelabelConfig][] | Relabel rules to apply to discovered Pods. +`relabelings` | [RelabelConfig][] | Relabel rules to apply to discovered Pods. -If `selector` is left as the default value, all Pods are discovered. If -`namespaceSelector` is left as the default value, all Namespaces are used for -Pod discovery. +If `selector` is left as the default value, all Pods are discovered. +If `namespaceSelector` is left as the default value, all Namespaces are used for Pod discovery. -The `relabelings` field can be used to modify labels from discovered Pods. The -following meta labels are available for relabeling: +The `relabelings` field can be used to modify labels from discovered Pods. +The following meta labels are available for relabeling: * `__meta_kubernetes_namespace`: The namespace of the Pod. * `__meta_kubernetes_pod_name`: The name of the Pod. * `__meta_kubernetes_pod_ip`: The pod IP of the Pod. * `__meta_kubernetes_pod_label_`: Each label from the Pod. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the Pod. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - Pod. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the Pod. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the Pod. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the Pod. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the Pod. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. * `__meta_kubernetes_pod_container_name`: Name of the container. * `__meta_kubernetes_pod_container_image`: The image the container is using. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the Pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the Pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the Pod. * `__meta_kubernetes_pod_controller_kind`: Object kind of the Pod's controller. @@ -132,23 +119,22 @@ In addition to the meta labels, the following labels are exposed to tell The following blocks are supported inside the definition of `loki.source.podlogs`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -selector | [selector][] | Label selector for which `PodLogs` to discover. | no -selector > match_expression | [match_expression][] | Label selector expression for which `PodLogs` to discover. | no -namespace_selector | [selector][] | Label selector for which namespaces to discover `PodLogs` in. | no -namespace_selector > match_expression | [match_expression][] | Label selector expression for which namespaces to discover `PodLogs` in. | no -clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no - -The `>` symbol indicates deeper levels of nesting. For example, `client > -basic_auth` refers to a `basic_auth` block defined -inside a `client` block. +Hierarchy | Block | Description | Required +--------------------------------------|----------------------|--------------------------------------------------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +selector | [selector][] | Label selector for which `PodLogs` to discover. | no +selector > match_expression | [match_expression][] | Label selector expression for which `PodLogs` to discover. | no +namespace_selector | [selector][] | Label selector for which namespaces to discover `PodLogs` in. | no +namespace_selector > match_expression | [match_expression][] | Label selector expression for which namespaces to discover `PodLogs` in. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no + +The `>` symbol indicates deeper levels of nesting. +For example, `client > basic_auth` refers to a `basic_auth` block defined inside a `client` block. [client]: #client-block [basic_auth]: #basic_auth-block @@ -163,23 +149,22 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -188,49 +173,47 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### selector block -The `selector` block describes a Kubernetes label selector for `PodLogs` or -Namespace discovery. +The `selector` block describes a Kubernetes label selector for `PodLogs` or Namespace discovery. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | no +Name | Type | Description | Default | Required +---------------|---------------|---------------------------------------------------|---------|--------- +`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | no When the `match_labels` argument is empty, all resources will be matched. ### match_expression block -The `match_expression` block describes a Kubernetes label match expression for -`PodLogs` or Namespace discovery. +The `match_expression` block describes a Kubernetes label match expression for `PodLogs` or Namespace discovery. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | The label name to match against. | | yes -`operator` | `string` | The operator to use when matching. | | yes -`values`| `list(string)` | The values used when matching. | | no +Name | Type | Description | Default | Required +-----------|----------------|------------------------------------|---------|--------- +`key` | `string` | The label name to match against. | | yes +`operator` | `string` | The operator to use when matching. | | yes +`values` | `list(string)` | The values used when matching. | | no The `operator` argument must be one of the following strings: @@ -244,9 +227,9 @@ Both `selector` and `namespace_selector` can make use of multiple ### clustering (beta) -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes +Name | Type | Description | Default | Required +----------|--------|-----------------------------------------------------|---------|--------- +`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes When {{< param "PRODUCT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.podlogs` component instance opts-in to participating in the @@ -255,7 +238,7 @@ cluster to distribute the load of log collection between all cluster nodes. If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `loki.source.podlogs` collects logs based on every PodLogs resource discovered. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.syslog.md b/docs/sources/reference/components/loki.source.syslog.md similarity index 68% rename from docs/sources/flow/reference/components/loki.source.syslog.md rename to docs/sources/reference/components/loki.source.syslog.md index b1b08bd675..19fed5694d 100644 --- a/docs/sources/flow/reference/components/loki.source.syslog.md +++ b/docs/sources/reference/components/loki.source.syslog.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.syslog/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.syslog/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.syslog/ description: Learn about loki.source.syslog title: loki.source.syslog --- @@ -18,8 +13,7 @@ with the [RFC5424](https://www.rfc-editor.org/rfc/rfc5424) format. The component starts a new syslog listener for each of the given `config` blocks and fans out incoming entries to the list of receivers in `forward_to`. -Multiple `loki.source.syslog` components can be specified by giving them -different labels. +Multiple `loki.source.syslog` components can be specified by giving them different labels. ## Usage @@ -38,25 +32,25 @@ loki.source.syslog "LABEL" { `loki.source.syslog` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | ---------------------- | -------------------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no +Name | Type | Description | Default | Required +----------------|----------------------|-------------------------------------------|---------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no The `relabel_rules` field can make use of the `rules` export value from a [loki.relabel][] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.syslog`: -Hierarchy | Name | Description | Required ---------- | ---- | ----------- | -------- -listener | [listener][] | Configures a listener for IETF Syslog (RFC5424) messages. | no +Hierarchy | Name | Description | Required +----------------------|----------------|-----------------------------------------------------------------------------|--------- +listener | [listener][] | Configures a listener for IETF Syslog (RFC5424) messages. | no listener > tls_config | [tls_config][] | Configures TLS settings for connecting to the endpoint for TCP connections. | no The `>` symbol indicates deeper levels of nesting. For example, `config > tls_config` @@ -75,34 +69,30 @@ The following arguments can be used to configure a `listener`. Only the `address` field is required and any omitted fields take their default values. -Name | Type | Description | Default | Required ------------------------- | ------------- | ----------- | ------- | -------- -`address` | `string` | The `` address to listen to for syslog messages. | | yes -`protocol` | `string` | The protocol to listen to for syslog messages. Must be either `tcp` or `udp`. | `tcp` | no -`idle_timeout` | `duration` | The idle timeout for tcp connections. | `"120s"` | no -`label_structured_data` | `bool` | Whether to translate syslog structured data to loki labels. | `false` | no -`labels` | `map(string)` | The labels to associate with each received syslog record. | `{}` | no -`use_incoming_timestamp` | `bool` | Whether to set the timestamp to the incoming syslog record timestamp. | `false` | no -`use_rfc5424_message` | `bool` | Whether to forward the full RFC5424-formatted syslog message. | `false` | no -`max_message_length` | `int` | The maximum limit to the length of syslog messages. | `8192` | no +Name | Type | Description | Default | Required +-------------------------|---------------|-------------------------------------------------------------------------------|----------|--------- +`address` | `string` | The `` address to listen to for syslog messages. | | yes +`protocol` | `string` | The protocol to listen to for syslog messages. Must be either `tcp` or `udp`. | `tcp` | no +`idle_timeout` | `duration` | The idle timeout for tcp connections. | `"120s"` | no +`label_structured_data` | `bool` | Whether to translate syslog structured data to loki labels. | `false` | no +`labels` | `map(string)` | The labels to associate with each received syslog record. | `{}` | no +`use_incoming_timestamp` | `bool` | Whether to set the timestamp to the incoming syslog record timestamp. | `false` | no +`use_rfc5424_message` | `bool` | Whether to forward the full RFC5424-formatted syslog message. | `false` | no +`max_message_length` | `int` | The maximum limit to the length of syslog messages. | `8192` | no -By default, the component assigns the log entry timestamp as the time it -was processed. +By default, the component assigns the log entry timestamp as the time it was processed. The `labels` map is applied to every message that the component reads. All header fields from the parsed RFC5424 messages are brought in as internal labels, prefixed with `__syslog_`. -If `label_structured_data` is set, structured data in the syslog header is also -translated to internal labels in the form of -`__syslog_message_sd__`. For example, a structured data entry of -`[example@99999 test="yes"]` becomes the label -`__syslog_message_sd_example_99999_test` with the value `"yes"`. +If `label_structured_data` is set, structured data in the syslog header is also translated to internal labels in the form of `__syslog_message_sd__`. +For example, a structured data entry of `[example@99999 test="yes"]` becomes the label `__syslog_message_sd_example_99999_test` with the value `"yes"`. ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/reference/components/loki.source.windowsevent.md similarity index 78% rename from docs/sources/flow/reference/components/loki.source.windowsevent.md rename to docs/sources/reference/components/loki.source.windowsevent.md index 522e9e683e..b0b7f4ee4e 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/reference/components/loki.source.windowsevent.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.windowsevent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.windowsevent/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.windowsevent/ description: Learn about loki.windowsevent title: loki.source.windowsevent --- # loki.source.windowsevent -`loki.source.windowsevent` reads events from Windows Event Logs and forwards them to other -`loki.*` components. +`loki.source.windowsevent` reads events from Windows Event Logs and forwards them to other `loki.*` components. -Multiple `loki.source.windowsevent` components can be specified by giving them -different labels. +Multiple `loki.source.windowsevent` components can be specified by giving them different labels. ## Usage @@ -44,19 +37,18 @@ Name | Type | Description `exclude_event_message` | `bool` | Exclude the human-friendly event message. | `false` | no `use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed. | `false` | no `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`labels` | `map(string)` | The labels to associate with incoming logs. | | no - - -> **NOTE**: `eventlog_name` is required if `xpath_query` does not specify the event log. -> You can define `xpath_query` in [short or xml form](https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events). -> When using the XML form you can specify `event_log` in the `xpath_query`. -> If using short form, you must define `eventlog_name`. +`labels` | `map(string)` | The labels to associate with incoming logs. | | no +{{< admonition type="note" >}} +`eventlog_name` is required if `xpath_query` does not specify the event log. +You can define `xpath_query` in [short or xml form](https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events). +When using the XML form you can specify `event_log` in the `xpath_query`. +If using short form, you must define `eventlog_name`. +{{< /admonition >}} ## Component health -`loki.source.windowsevent` is only reported as unhealthy if given an invalid -configuration. +`loki.source.windowsevent` is only reported as unhealthy if given an invalid configuration. ## Example diff --git a/docs/sources/flow/reference/components/loki.write.md b/docs/sources/reference/components/loki.write.md similarity index 61% rename from docs/sources/flow/reference/components/loki.write.md rename to docs/sources/reference/components/loki.write.md index bb50817385..3ce6e0554a 100644 --- a/docs/sources/flow/reference/components/loki.write.md +++ b/docs/sources/reference/components/loki.write.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.write/ description: Learn about loki.write title: loki.write --- # loki.write -`loki.write` receives log entries from other loki components and sends them -over the network using Loki's `logproto` format. +`loki.write` receives log entries from other loki components and sends them over the network using Loki's `logproto` format. -Multiple `loki.write` components can be specified by giving them -different labels. +Multiple `loki.write` components can be specified by giving them different labels. ## Usage @@ -31,25 +24,25 @@ loki.write "LABEL" { `loki.write` supports the following arguments: -Name | Type | Description | Default | Required ------------------ | ------------- | ------------------------------------------------ | ------- | -------- -`max_streams` | `int` | Maximum number of active streams. | 0 (no limit) | no -`external_labels` | `map(string)` | Labels to add to logs sent over the network. | | no +Name | Type | Description | Default | Required +------------------|---------------|----------------------------------------------|--------------|--------- +`max_streams` | `int` | Maximum number of active streams. | 0 (no limit) | no +`external_labels` | `map(string)` | Labels to add to logs sent over the network. | | no ## Blocks The following blocks are supported inside the definition of `loki.write`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -endpoint | [endpoint][] | Location to send logs to. | no -wal | [wal][] | Write-ahead log configuration. | no -endpoint > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -endpoint > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -endpoint > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -endpoint > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -endpoint > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-------------------------------|-------------------|----------------------------------------------------------|--------- +endpoint | [endpoint][] | Location to send logs to. | no +wal | [wal][] | Write-ahead log configuration. | no +endpoint > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +endpoint > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +endpoint > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +endpoint > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +endpoint > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | endpoint > queue_config | [queue_config][] | When WAL is enabled, configures the queue client. | no | The `>` symbol indicates deeper levels of nesting. For example, `endpoint > @@ -71,27 +64,27 @@ The `endpoint` block describes a single location to send logs to. Multiple The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | --------- | -------- -`url` | `string` | Full URL to send logs to. | | yes -`name` | `string` | Optional name to identify this endpoint with. | | no -`headers` | `map(string)` | Extra headers to deliver with the request. | | no -`batch_wait` | `duration` | Maximum amount of time to wait before sending a batch. | `"1s"` | no -`batch_size` | `string` | Maximum batch size of logs to accumulate before sending. | `"1MiB"` | no -`remote_timeout` | `duration` | Timeout for requests made to the URL. | `"10s"` | no -`tenant_id` | `string` | The tenant ID used by default to push logs. | | no -`min_backoff_period` | `duration` | Initial backoff time between retries. | `"500ms"` | no -`max_backoff_period` | `duration` | Maximum backoff time between retries. | `"5m"` | no -`max_backoff_retries` | `int` | Maximum number of retries. | 10 | no -`retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-----------|--------- +`url` | `string` | Full URL to send logs to. | | yes +`name` | `string` | Optional name to identify this endpoint with. | | no +`headers` | `map(string)` | Extra headers to deliver with the request. | | no +`batch_wait` | `duration` | Maximum amount of time to wait before sending a batch. | `"1s"` | no +`batch_size` | `string` | Maximum batch size of logs to accumulate before sending. | `"1MiB"` | no +`remote_timeout` | `duration` | Timeout for requests made to the URL. | `"10s"` | no +`tenant_id` | `string` | The tenant ID used by default to push logs. | | no +`min_backoff_period` | `duration` | Initial backoff time between retries. | `"500ms"` | no +`max_backoff_period` | `duration` | Maximum backoff time between retries. | `"5m"` | no +`max_backoff_retries` | `int` | Maximum number of retries. | 10 | no +`retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#endpoint-block). @@ -100,7 +93,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} If no `tenant_id` is provided, the component assumes that the Loki instance at `endpoint` is running in single-tenant mode and no X-Scope-OrgID header is @@ -122,19 +115,19 @@ enabled, the retry mechanism will be governed by the backoff configuration speci ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### queue_config block (experimental) @@ -143,9 +136,9 @@ underlying client queues batches of logs to be sent to Loki. The following arguments are supported: -| Name | Type | Description | Default | Required | -| --------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | -| `capacity` | `string` | Controls the size of the underlying send queue buffer. This setting should be considered a worst-case scenario of memory consumption, in which all enqueued batches are full. | `10MiB` | no | +| Name | Type | Description | Default | Required | +|-----------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `capacity` | `string` | Controls the size of the underlying send queue buffer. This setting should be considered a worst-case scenario of memory consumption, in which all enqueued batches are full. | `10MiB` | no | | `drain_timeout` | `duration` | Configures the maximum time the client can take to drain the send queue upon shutdown. During that time, it will enqueue pending batches and drain the send queue sending each. | `"1m"` | no | ### wal block (experimental) @@ -165,28 +158,27 @@ storage path {{< param "PRODUCT_NAME" >}} is configured to use. See the The following arguments are supported: -Name | Type | Description | Default | Required ---------------------- |------------|--------------------------------------------------------------------------------------------------------------------|-----------| -------- -`enabled` | `bool` | Whether to enable the WAL. | false | no -`max_segment_age` | `duration` | Maximum time a WAL segment should be allowed to live. Segments older than this setting will be eventually deleted. | `"1h"` | no -`min_read_frequency` | `duration` | Minimum backoff time in the backup read mechanism. | `"250ms"` | no -`max_read_frequency` | `duration` | Maximum backoff time in the backup read mechanism. | `"1s"` | no -`drain_timeout` | `duration` | Maximum time the WAL drain procedure can take, before being forcefully stopped. | `"30s"` | no +Name | Type | Description | Default | Required +---------------------|------------|--------------------------------------------------------------------------------------------------------------------|-----------|--------- +`enabled` | `bool` | Whether to enable the WAL. | false | no +`max_segment_age` | `duration` | Maximum time a WAL segment should be allowed to live. Segments older than this setting will be eventually deleted. | `"1h"` | no +`min_read_frequency` | `duration` | Minimum backoff time in the backup read mechanism. | `"250ms"` | no +`max_read_frequency` | `duration` | Maximum backoff time in the backup read mechanism. | `"1s"` | no +`drain_timeout` | `duration` | Maximum time the WAL drain procedure can take, before being forcefully stopped. | `"30s"` | no -[run]: {{< relref "../cli/run.md" >}} +[run]: ../../cli/run/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +-----------|----------------|-------------------------------------------------------------- `receiver` | `LogsReceiver` | A value that other components can use to send log entries to. ## Component health -`loki.write` is only reported as unhealthy if given an invalid -configuration. +`loki.write` is only reported as unhealthy if given an invalid configuration. ## Debug information diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/reference/components/mimir.rules.kubernetes.md similarity index 81% rename from docs/sources/flow/reference/components/mimir.rules.kubernetes.md rename to docs/sources/reference/components/mimir.rules.kubernetes.md index 9a8672005b..7451a5f8ae 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/reference/components/mimir.rules.kubernetes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/mimir.rules.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/mimir.rules.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/mimir.rules.kubernetes/ description: Learn about mimir.rules.kubernetes labels: stage: beta @@ -13,7 +8,7 @@ title: mimir.rules.kubernetes # mimir.rules.kubernetes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `mimir.rules.kubernetes` discovers `PrometheusRule` Kubernetes resources and loads them into a Mimir instance. @@ -47,22 +42,22 @@ mimir.rules.kubernetes "LABEL" { `mimir.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required ------------------------- | ------------------- | --------------------------------------------------------------- | ------------- | -------- -`address` | `string` | URL of the Mimir ruler. | | yes -`tenant_id` | `string` | Mimir tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no -`prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------|--------- +`address` | `string` | URL of the Mimir ruler. | | yes +`tenant_id` | `string` | Mimir tenant ID. | | no +`use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no +`prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no +`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no +`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -73,7 +68,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} If no `tenant_id` is provided, the component assumes that the Mimir instance at `address` is running in single-tenant mode and no `X-Scope-OrgID` header is sent. @@ -157,19 +152,19 @@ The `values` argument must not be provided when `operator` is set to `"Exists"` ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/module.file.md b/docs/sources/reference/components/module.file.md similarity index 73% rename from docs/sources/flow/reference/components/module.file.md rename to docs/sources/reference/components/module.file.md index 0e4b8b19d2..78dc24e8ad 100644 --- a/docs/sources/flow/reference/components/module.file.md +++ b/docs/sources/reference/components/module.file.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.file/ description: Learn about module.file labels: stage: beta @@ -13,7 +8,7 @@ title: module.file # module.file -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.file` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. @@ -22,9 +17,9 @@ component which retrieves a [module][] and runs the components defined inside of a [local.file][] component. This allows a single module loader to do the equivalence of using the more generic [module.string][] paired with a [local.file][] component. -[module]: {{< relref "../../concepts/modules.md" >}} -[local.file]: {{< relref "./local.file.md" >}} -[module.string]: {{< relref "./module.string.md" >}} +[module]: ../../../concepts/modules/ +[local.file]: ../local.file/ +[module.string]: ../module.string/ ## Usage @@ -44,23 +39,23 @@ module.file "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`filename` | `string` | Path of the file on disk to watch | | yes +Name | Type | Description | Default | Required +-----------------|------------|----------------------------------------------------|--------------|--------- +`filename` | `string` | Path of the file on disk to watch | | yes `detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no -`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no +`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no +`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `module.file`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -79,23 +74,22 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed from the parent config via `module.file.LABEL.exports.EXPORT_LABEL`. -Values in `exports` correspond to [export blocks][] defined in the module -source. +Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.git.md b/docs/sources/reference/components/module.git.md similarity index 68% rename from docs/sources/flow/reference/components/module.git.md rename to docs/sources/reference/components/module.git.md index 44bdee36a0..a0a821a008 100644 --- a/docs/sources/flow/reference/components/module.git.md +++ b/docs/sources/reference/components/module.git.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.git/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.git/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.git/ description: Learn about module.git labels: stage: beta @@ -13,14 +8,14 @@ title: module.git # module.git -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.git` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. `module.git` retrieves a module source from a file in a Git repository. -[module]: {{< relref "../../concepts/modules.md" >}} +[module]: ../../../concepts/modules/ ## Usage @@ -41,12 +36,12 @@ module.git "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`repository` | `string` | The Git repository address to retrieve the module from. | | yes -`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no -`path` | `string` | The path in the repository where the module is stored. | | yes -`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no +Name | Type | Description | Default | Required +-----------------|------------|---------------------------------------------------------|----------|--------- +`repository` | `string` | The Git repository address to retrieve the module from. | | yes +`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no +`path` | `string` | The path in the repository where the module is stored. | | yes +`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no The `repository` attribute must be set to a repository address that would be recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as @@ -66,11 +61,11 @@ the retrieved changes. The following blocks are supported inside the definition of `module.git`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|------------------------------------------------------|--------- basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the repo. | no -ssh_key | [ssh_key][] | Configure a SSH Key for authenticating to the repo. | no -arguments | [arguments][] | Arguments to pass to the module. | no +ssh_key | [ssh_key][] | Configure a SSH Key for authenticating to the repo. | no +arguments | [arguments][] | Arguments to pass to the module. | no [basic_auth]: #basic_auth-block [ssh_key]: #ssh_key-block @@ -78,16 +73,16 @@ arguments | [arguments][] | Arguments to pass to the module. | no ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### ssh_key block -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`username` | `string` | SSH username. | | yes -`key` | `secret` | SSH private key | | no -`key_file` | `string` | SSH private key path. | | no -`passphrase` | `secret` | Passphrase for SSH key if needed. | | no +Name | Type | Description | Default | Required +-------------|----------|-----------------------------------|---------|--------- +`username` | `string` | SSH username. | | yes +`key` | `secret` | SSH private key | | no +`key_file` | `string` | SSH private key path. | | no +`passphrase` | `secret` | Passphrase for SSH key if needed. | | no ### arguments block @@ -103,14 +98,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -119,7 +114,7 @@ from the parent config via `module.git.COMPONENT_LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.http.md b/docs/sources/reference/components/module.http.md similarity index 69% rename from docs/sources/flow/reference/components/module.http.md rename to docs/sources/reference/components/module.http.md index 24e140f794..b0ccdf67b6 100644 --- a/docs/sources/flow/reference/components/module.http.md +++ b/docs/sources/reference/components/module.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.http/ description: Learn about module.http labels: stage: beta @@ -13,7 +8,7 @@ title: module.http # module.http -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.http` is a [module loader][] component. @@ -21,10 +16,10 @@ title: module.http HTTP server. This allows you to use a single module loader, rather than a `remote.http` component paired with a [module.string][] component. -[module]: {{< relref "../../concepts/modules.md" >}} -[remote.http]: {{< relref "./remote.http.md" >}} -[module.string]: {{< relref "./module.string.md" >}} -[module loader]: {{< relref "../../concepts/modules.md#module-loaders" >}} +[module]: ../../../concepts/modules/ +[remote.http]: ../remote.http/ +[module.string]: ../module.string/ +[module loader]: ../../../concepts/modules/#module-loaders ## Usage @@ -43,23 +38,23 @@ module.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`url` | `string` | URL to poll. | | yes -`method` | `string` | Define HTTP method for the request | `"GET"` | no -`headers` | `map(string)` | Custom headers for the request. | `{}` | no -`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no -`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no -`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no +Name | Type | Description | Default | Required +-----------------|---------------|--------------------------------------------------------------|---------|--------- +`url` | `string` | URL to poll. | | yes +`method` | `string` | Define HTTP method for the request | `"GET"` | no +`headers` | `map(string)` | Custom headers for the request. | `{}` | no +`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no +`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no +`is_secret` | `bool` | Whether the response body should be treated as a [secret][]. | false | no -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks The following blocks are supported inside the definition of `module.http`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -78,14 +73,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader are rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -94,7 +89,7 @@ from the parent config via `module.http.LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.string.md b/docs/sources/reference/components/module.string.md similarity index 76% rename from docs/sources/flow/reference/components/module.string.md rename to docs/sources/reference/components/module.string.md index ef8c5e0b88..ee4fbd2a8d 100644 --- a/docs/sources/flow/reference/components/module.string.md +++ b/docs/sources/reference/components/module.string.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.string/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.string/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.string/ description: Learn about module.string labels: stage: beta @@ -13,12 +8,12 @@ title: module.string # module.string -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.string` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. -[module]: {{< relref "../../concepts/modules.md" >}} +[module]: ../../../concepts/modules/ ## Usage @@ -38,9 +33,9 @@ module.string "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`content` | `secret` or `string` | The contents of the module to load as a secret or string. | | yes +Name | Type | Description | Default | Required +----------|----------------------|-----------------------------------------------------------|---------|--------- +`content` | `secret` or `string` | The contents of the module to load as a secret or string. | | yes `content` is a string that contains the configuration of the module to load. `content` is typically loaded by using the exports of another component. For example, @@ -53,8 +48,8 @@ Name | Type | Description | Default | Required The following blocks are supported inside the definition of `module.string`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -73,14 +68,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -89,7 +84,7 @@ from the parent config via `module.string.LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/otelcol.auth.basic.md b/docs/sources/reference/components/otelcol.auth.basic.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.auth.basic.md rename to docs/sources/reference/components/otelcol.auth.basic.md index 885eb53f09..97dbaf0e08 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.basic.md +++ b/docs/sources/reference/components/otelcol.auth.basic.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.basic/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.basic/ description: Learn about otelcol.auth.basic title: otelcol.auth.basic --- @@ -36,17 +31,17 @@ otelcol.auth.basic "LABEL" { `otelcol.auth.basic` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`username` | `string` | Username to use for basic authentication requests. | | yes -`password` | `secret` | Password to use for basic authentication requests. | | yes +Name | Type | Description | Default | Required +-----------|----------|----------------------------------------------------|---------|--------- +`username` | `string` | Username to use for basic authentication requests. | | yes +`password` | `secret` | Password to use for basic authentication requests. | | yes ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -76,4 +71,4 @@ otelcol.auth.basic "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.bearer.md b/docs/sources/reference/components/otelcol.auth.bearer.md similarity index 62% rename from docs/sources/flow/reference/components/otelcol.auth.bearer.md rename to docs/sources/reference/components/otelcol.auth.bearer.md index 718789603b..1bdcea1885 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.bearer.md +++ b/docs/sources/reference/components/otelcol.auth.bearer.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.bearer/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.bearer/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.bearer/ description: Learn about otelcol.auth.bearer title: otelcol.auth.bearer --- @@ -16,12 +11,12 @@ components to authenticate requests using bearer token authentication. This extension supports both server and client authentication. -> **NOTE**: `otelcol.auth.bearer` is a wrapper over the upstream OpenTelemetry -> Collector `bearertokenauth` extension. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.auth.bearer` is a wrapper over the upstream OpenTelemetry Collector `bearertokenauth` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -Multiple `otelcol.auth.bearer` components can be specified by giving them -different labels. +Multiple `otelcol.auth.bearer` components can be specified by giving them different labels. ## Usage @@ -35,10 +30,10 @@ otelcol.auth.bearer "LABEL" { `otelcol.auth.bearer` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`token` | `secret` | Bearer token to use for authenticating requests. | | yes -`scheme` | `string` | Authentication scheme name. | "Bearer" | no +Name | Type | Description | Default | Required +---------|----------|--------------------------------------------------|----------|--------- +`token` | `secret` | Bearer token to use for authenticating requests. | | yes +`scheme` | `string` | Authentication scheme name. | "Bearer" | no When sending the token, the value of `scheme` is prepended to the `token` value. The string is then sent out as either a header (in case of HTTP) or as metadata (in case of gRPC). @@ -47,8 +42,8 @@ The string is then sent out as either a header (in case of HTTP) or as metadata The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -66,8 +61,7 @@ configuration. The example below configures [otelcol.exporter.otlp][] to use a bearer token authentication. -If we assume that the value of the `API_KEY` environment variable is `SECRET_API_KEY`, then -the `Authorization` RPC metadata is set to `Bearer SECRET_API_KEY`. +If we assume that the value of the `API_KEY` environment variable is `SECRET_API_KEY`, then the `Authorization` RPC metadata is set to `Bearer SECRET_API_KEY`. ```river otelcol.exporter.otlp "example" { @@ -103,5 +97,5 @@ otelcol.auth.bearer "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} -[otelcol.exporter.otlphttp]: {{< relref "./otelcol.exporter.otlphttp.md" >}} +[otelcol.exporter.otlp]: {../otelcol.exporter.otlp/ +[otelcol.exporter.otlphttp]: ../otelcol.exporter.otlphttp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.headers.md b/docs/sources/reference/components/otelcol.auth.headers.md similarity index 71% rename from docs/sources/flow/reference/components/otelcol.auth.headers.md rename to docs/sources/reference/components/otelcol.auth.headers.md index 6b70a021de..734b24c992 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.headers.md +++ b/docs/sources/reference/components/otelcol.auth.headers.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.headers/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.headers/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.headers/ description: Learn about otelcol.auth.headers title: otelcol.auth.headers --- @@ -14,12 +9,12 @@ title: otelcol.auth.headers `otelcol.auth.headers` exposes a `handler` that can be used by other `otelcol` components to authenticate requests using custom headers. -> **NOTE**: `otelcol.auth.headers` is a wrapper over the upstream OpenTelemetry -> Collector `headerssetter` extension. Bug reports or feature requests will be -> redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.auth.headers` is a wrapper over the upstream OpenTelemetry Collector `headerssetter` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -Multiple `otelcol.auth.headers` components can be specified by giving them -different labels. +Multiple `otelcol.auth.headers` components can be specified by giving them different labels. ## Usage @@ -42,9 +37,9 @@ through inner blocks. The following blocks are supported inside the definition of `otelcol.auth.headers`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -header | [header][] | Custom header to attach to requests. | no +Hierarchy | Block | Description | Required +----------|------------|--------------------------------------|--------- +header | [header][] | Custom header to attach to requests. | no [header]: #header-block @@ -53,12 +48,12 @@ header | [header][] | Custom header to attach to requests. | no The `header` block defines a custom header to attach to requests. It is valid to provide multiple `header` blocks to set more than one header. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Name of the header to set. | | yes -`value` | `string` or `secret` | Value of the header. | | no -`from_context` | `string` | Metadata name to get header value from. | | no -`action` | `string` | An action to perform on the header | "upsert" | no +Name | Type | Description | Default | Required +---------------|----------------------|-----------------------------------------|----------|--------- +`key` | `string` | Name of the header to set. | | yes +`value` | `string` or `secret` | Value of the header. | | no +`from_context` | `string` | Metadata name to get header value from. | | no +`action` | `string` | An action to perform on the header | "upsert" | no The supported values for `action` are: * `insert`: Inserts the new header if it does not exist. @@ -145,4 +140,4 @@ otelcol.exporter.otlp "production" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md b/docs/sources/reference/components/otelcol.auth.oauth2.md similarity index 63% rename from docs/sources/flow/reference/components/otelcol.auth.oauth2.md rename to docs/sources/reference/components/otelcol.auth.oauth2.md index 4584f47eb7..28e7cc8e20 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md +++ b/docs/sources/reference/components/otelcol.auth.oauth2.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.oauth2/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.oauth2/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.oauth2/ description: Learn about otelcol.auth.oauth2 title: otelcol.auth.oauth2 --- @@ -37,14 +32,14 @@ otelcol.auth.oauth2 "LABEL" { ## Arguments -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`client_id` | `string` | The client identifier issued to the client. | | yes -`client_secret` | `secret` | The secret string associated with the client identifier. | | yes -`token_url` | `string` | The server endpoint URL from which to get tokens. | | yes -`endpoint_params` | `map(list(string))` | Additional parameters that are sent to the token endpoint. | `{}` | no -`scopes` | `list(string)` | Requested permissions associated for the client. | `[]` | no -`timeout` | `duration` | The timeout on the client connecting to `token_url`. | `"0s"` | no +Name | Type | Description | Default | Required +------------------|---------------------|------------------------------------------------------------|---------|--------- +`client_id` | `string` | The client identifier issued to the client. | | yes +`client_secret` | `secret` | The secret string associated with the client identifier. | | yes +`token_url` | `string` | The server endpoint URL from which to get tokens. | | yes +`endpoint_params` | `map(list(string))` | Additional parameters that are sent to the token endpoint. | `{}` | no +`scopes` | `list(string)` | Requested permissions associated for the client. | `[]` | no +`timeout` | `duration` | The timeout on the client connecting to `token_url`. | `"0s"` | no The `timeout` argument is used both for requesting initial tokens and for refreshing tokens. `"0s"` implies no timeout. @@ -53,25 +48,24 @@ The `timeout` argument is used both for requesting initial tokens and for refres The following blocks are supported inside the definition of `otelcol.auth.oauth2`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -tls | [tls][] | TLS settings for the token client. | no +Hierarchy | Block | Description | Required +----------|---------|------------------------------------|--------- +tls | [tls][] | TLS settings for the token client. | no [tls]: #tls-block ### tls block -The `tls` block configures TLS settings used for connecting to the token client. If the `tls` block isn't provided, -TLS won't be used for communication. +The `tls` block configures TLS settings used for connecting to the token client. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -121,4 +115,4 @@ otelcol.auth.oauth2 "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md b/docs/sources/reference/components/otelcol.auth.sigv4.md similarity index 64% rename from docs/sources/flow/reference/components/otelcol.auth.sigv4.md rename to docs/sources/reference/components/otelcol.auth.sigv4.md index e4fc91df28..8ac55e2918 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md +++ b/docs/sources/reference/components/otelcol.auth.sigv4.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.sigv4/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.sigv4/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.sigv4/ description: Learn about otelcol.auth.sigv4 title: otelcol.auth.sigv4 --- @@ -12,9 +7,10 @@ title: otelcol.auth.sigv4 # otelcol.auth.sigv4 `otelcol.auth.sigv4` exposes a `handler` that can be used by other `otelcol` -components to authenticate requests to AWS services using the AWS Signature Version 4 (SigV4) protocol. -For more information about SigV4 see the AWS documentation about -[Signing AWS API requests](https://docs.aws.amazon.com/general/latest/gr/signing-aws-api-requests.html) . +components to authenticate requests to AWS services using the AWS Signature Version 4 (SigV4) protocol. +For more information about SigV4 see the AWS documentation about [Signing AWS API requests][]. + +[Signing AWS API requests]: https://docs.aws.amazon.com/general/latest/gr/signing-aws-api-requests.html > **NOTE**: `otelcol.auth.sigv4` is a wrapper over the upstream OpenTelemetry > Collector `sigv4auth` extension. Bug reports or feature requests will be @@ -23,8 +19,11 @@ For more information about SigV4 see the AWS documentation about Multiple `otelcol.auth.sigv4` components can be specified by giving them different labels. -> **NOTE**: The Agent must have valid AWS credentials as used by the -[AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). +{{< admonition type="note" >}} +{{< param "PRODUCT_NAME" >}} must have valid AWS credentials as used by the [AWS SDK for Go][]. + +[AWS SDK for Go]: https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials +{{< /admonition >}} ## Usage @@ -35,31 +34,31 @@ otelcol.auth.sigv4 "LABEL" { ## Arguments -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`region` | `string` | The AWS region to sign with. | "" | no -`service` | `string` | The AWS service to sign with. | "" | no +Name | Type | Description | Default | Required +----------|----------|-------------------------------|---------|--------- +`region` | `string` | The AWS region to sign with. | "" | no +`service` | `string` | The AWS service to sign with. | "" | no If `region` and `service` are left empty, their values are inferred from the URL of the exporter using the following rules: * If the exporter URL starts with `aps-workspaces` and `service` is empty, `service` will be set to `aps`. * If the exporter URL starts with `search-` and `service` is empty, `service` will be set to `es`. -* If the exporter URL starts with either `aps-workspaces` or `search-` and `region` is empty, `region` . -will be set to the value between the first and second `.` character in the exporter URL. +* If the exporter URL starts with either `aps-workspaces` or `search-` and `region` is empty, `region` will be set to the value between the first and second `.` character in the exporter URL. If none of the above rules apply, then `region` and `service` must be specified. -A list of valid AWS regions can be found on Amazon's documentation for -[Regions, Availability Zones, and Local Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). +A list of valid AWS regions can be found on Amazon's documentation for [Regions, Availability Zones, and Local Zones][]. + +[Regions, Availability Zones, and Local Zones]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html ## Blocks The following blocks are supported inside the definition of `otelcol.auth.sigv4`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +------------|-----------------|------------------------------------|--------- assume_role | [assume_role][] | Configuration for assuming a role. | no [assume_role]: #assume_role-block @@ -68,14 +67,13 @@ assume_role | [assume_role][] | Configuration for assuming a role. | no The `assume_role` block specifies the configuration needed to assume a role. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`arn` | `string` | The Amazon Resource Name (ARN) of a role to assume. | "" | no -`session_name` | `string` | The name of a role session. | "" | no -`sts_region` | `string` | The AWS region where STS is used to assume the configured role. | "" | no +Name | Type | Description | Default | Required +---------------|----------|-----------------------------------------------------------------|---------|--------- +`arn` | `string` | The Amazon Resource Name (ARN) of a role to assume. | "" | no +`session_name` | `string` | The name of a role session. | "" | no +`sts_region` | `string` | The AWS region where STS is used to assume the configured role. | "" | no -If the `assume_role` block is specified in the config and `sts_region` is not set, then `sts_region` -will default to the value for `region`. +If the `assume_role` block is specified in the config and `sts_region` is not set, then `sts_region` will default to the value for `region`. For cross region authentication, `region` and `sts_region` can be set different to different values. @@ -83,8 +81,8 @@ For cross region authentication, `region` and `sts_region` can be set different The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -153,8 +151,7 @@ otelcol.auth.sigv4 "creds" { ### Specifying "region" and "service" explicitly and adding a "role" to assume -In this example we have also specified configuration to assume a role. `sts_region` has not been -provided, so it will default to the value of `region` which is `example_region`. +In this example we have also specified configuration to assume a role. `sts_region` hasn't been provided, so it will default to the value of `region` which is `example_region`. ```river otelcol.exporter.otlp "example" { @@ -167,7 +164,7 @@ otelcol.exporter.otlp "example" { otelcol.auth.sigv4 "creds" { region = "example_region" service = "example_service" - + assume_role { session_name = "role_session_name" } diff --git a/docs/sources/flow/reference/components/otelcol.connector.host_info.md b/docs/sources/reference/components/otelcol.connector.host_info.md similarity index 87% rename from docs/sources/flow/reference/components/otelcol.connector.host_info.md rename to docs/sources/reference/components/otelcol.connector.host_info.md index 53d8a1663a..81c4bf1a0e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.host_info.md +++ b/docs/sources/reference/components/otelcol.connector.host_info.md @@ -1,8 +1,5 @@ --- -aliases: - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.host_info/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.host_info/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.host_info/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.host_info/ description: Learn about otelcol.connector.host_info labels: stage: experimental @@ -11,7 +8,7 @@ title: otelcol.connector.host_info # otelcol.connector.host_info -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otel.connector.host_info` accepts span data from other `otelcol` components and generates usage metrics. @@ -47,7 +44,7 @@ The following blocks are supported inside the definition of ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/reference/components/otelcol.connector.servicegraph.md similarity index 74% rename from docs/sources/flow/reference/components/otelcol.connector.servicegraph.md rename to docs/sources/reference/components/otelcol.connector.servicegraph.md index 06f20833f0..48f9b0e39d 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/reference/components/otelcol.connector.servicegraph.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.servicegraph/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.servicegraph/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.servicegraph/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.servicegraph/ description: Learn about otelcol.connector.servicegraph labels: stage: experimental @@ -11,14 +8,13 @@ title: otelcol.connector.servicegraph # otelcol.connector.servicegraph -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} -`otelcol.connector.servicegraph` accepts span data from other `otelcol` components and -outputs metrics representing the relationship between various services in a system. +`otelcol.connector.servicegraph` accepts span data from other `otelcol` components and outputs metrics representing the relationship between various services in a system. A metric represents an edge in the service graph. -Those metrics can then be used by a data visualization application (e.g. -[Grafana](/docs/grafana/latest/explore/trace-integration/#service-graph)) -to draw the service graph. +Those metrics can then be used by a data visualization application (e.g. [Grafana][]) to draw the service graph. + +[Grafana]: https://grafana.com/docs/grafana/latest/explore/trace-integration/#service-graph > **NOTE**: `otelcol.connector.servicegraph` is a wrapper over the upstream > OpenTelemetry Collector `servicegraph` connector. Bug reports or feature requests @@ -31,7 +27,7 @@ This component is based on [Grafana Tempo's service graph processor](https://git Service graphs are useful for a number of use-cases: -* Infer the topology of a distributed system. As distributed systems grow, they become more complex. +* Infer the topology of a distributed system. As distributed systems grow, they become more complex. Service graphs can help you understand the structure of the system. * Provide a high level overview of the health of your system. Service graphs show error rates, latencies, and other relevant data. @@ -42,9 +38,11 @@ Service graphs are useful for a number of use-cases: Since `otelcol.connector.servicegraph` has to process both sides of an edge, it needs to process all spans of a trace to function properly. If spans of a trace are spread out over multiple Agent instances, spans cannot be paired reliably. -A solution to this problem is using [otelcol.exporter.loadbalancing]({{< relref "./otelcol.exporter.loadbalancing.md" >}}) +A solution to this problem is using [otelcol.exporter.loadbalancing][] in front of Agent instances running `otelcol.connector.servicegraph`. +[otelcol.exporter.loadbalancing]: ../otelcol.exporter.loadbalancing/ + ## Usage ```river @@ -59,28 +57,26 @@ otelcol.connector.servicegraph "LABEL" { `otelcol.connector.servicegraph` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`latency_histogram_buckets` | `list(duration)` | Buckets for latency histogram metrics. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no -`dimensions` | `list(string)` | A list of dimensions to add with the default dimensions. | `[]` | no -`cache_loop` | `duration` | Configures how often to delete series which have not been updated. | `"1m"` | no -`store_expiration_loop` | `duration` | The time to expire old entries from the store periodically. | `"2s"` | no - -Service graphs work by inspecting traces and looking for spans with -parent-children relationship that represent a request. -`otelcol.connector.servicegraph` uses OpenTelemetry semantic conventions -to detect a myriad of requests. +Name | Type | Description | Default | Required +----------------------------|------------------|--------------------------------------------------------------------|---------|--------- +`latency_histogram_buckets` | `list(duration)` | Buckets for latency histogram metrics. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no +`dimensions` | `list(string)` | A list of dimensions to add with the default dimensions. | `[]` | no +`cache_loop` | `duration` | Configures how often to delete series which have not been updated. | `"1m"` | no +`store_expiration_loop` | `duration` | The time to expire old entries from the store periodically. | `"2s"` | no + +Service graphs work by inspecting traces and looking for spans with parent-children relationship that represent a request. +`otelcol.connector.servicegraph` uses OpenTelemetry semantic conventions to detect a myriad of requests. The following requests are currently supported: * A direct request between two services, where the outgoing and the incoming span must have a [Span Kind][] value of `client` and `server` respectively. -* A request across a messaging system, where the outgoing and the incoming span +* A request across a messaging system, where the outgoing and the incoming span must have a [Span Kind][] value of `producer` and `consumer` respectively. * A database request, where spans have a [Span Kind][] with a value of `client`, as well as an attribute with a key of `db.name`. Every span which can be paired up to form a request is kept in an in-memory store: -* If the TTL of the span expires before it can be paired, it is deleted from the store. +* If the TTL of the span expires before it can be paired, it is deleted from the store. TTL is configured in the [store][] block. * If the span is paired prior to its expiration, a metric is recorded and the span is deleted from the store. @@ -97,12 +93,11 @@ The following metrics are emitted by the processor: Duration is measured both from the client and the server sides. -The `latency_histogram_buckets` argument controls the buckets for +The `latency_histogram_buckets` argument controls the buckets for `traces_service_graph_request_server_seconds` and `traces_service_graph_request_client_seconds`. -Each emitted metrics series have a `client` and a `server` label corresponding with the -service doing the request and the service receiving the request. The value of the label -is derived from the `service.name` resource attribute of the two spans. +Each emitted metrics series have a `client` and a `server` label corresponding with the service doing the request and the service receiving the request. +The value of the label is derived from the `service.name` resource attribute of the two spans. The `connection_type` label may not be set. If it is set, its value will be either `messaging_system` or `database`. @@ -110,8 +105,7 @@ Additional labels can be included using the `dimensions` configuration option: * Those labels will have a prefix to mark where they originate (client or server span kinds). The `client_` prefix relates to the dimensions coming from spans with a [Span Kind][] of `client`. The `server_` prefix relates to the dimensions coming from spans with a [Span Kind][] of `server`. -* Firstly the resource attributes will be searched. If the attribute is not found, - the span attributes will be searched. +* Firstly the resource attributes will be searched. If the attribute is not found, the span attributes will be searched. [Span Kind]: https://opentelemetry.io/docs/concepts/signals/traces/#span-kind @@ -120,10 +114,10 @@ Additional labels can be included using the `dimensions` configuration option: The following blocks are supported inside the definition of `otelcol.connector.servicegraph`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -store | [store][] | Configures the in-memory store for spans. | no -output | [output][] | Configures where to send telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|-------------------------------------------|--------- +store | [store][] | Configures the in-memory store for spans. | no +output | [output][] | Configures where to send telemetry data. | yes [store]: #store-block [output]: #output-block @@ -132,21 +126,21 @@ output | [output][] | Configures where to send telemetry data. | yes The `store` block configures the in-memory store for spans. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no -`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no +Name | Type | Description | Default | Required +------------|------------|-----------------------------------------------|---------|--------- +`max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no +`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` traces telemetry data. It does not accept metrics and logs. @@ -166,15 +160,14 @@ information. The example below accepts traces, creates service graph metrics from them, and writes the metrics to Mimir. The traces are written to Tempo. -`otelcol.connector.servicegraph` also adds a label to each metric with the value of the "http.method" -span/resource attribute. +`otelcol.connector.servicegraph` also adds a label to each metric with the value of the "http.method" span/resource attribute. ```river otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:4320" } - + output { traces = [otelcol.connector.servicegraph.default.input,otelcol.exporter.otlp.grafana_cloud_tempo.input] } @@ -194,7 +187,7 @@ otelcol.exporter.prometheus "default" { prometheus.remote_write "mimir" { endpoint { url = "https://prometheus-xxx.grafana.net/api/prom/push" - + basic_auth { username = env("PROMETHEUS_USERNAME") password = env("GRAFANA_CLOUD_API_KEY") @@ -216,10 +209,13 @@ otelcol.auth.basic "grafana_cloud_tempo" { ``` Some of the metrics in Mimir may look like this: + ``` traces_service_graph_request_total{client="shop-backend",failed="false",server="article-service",client_http_method="DELETE",server_http_method="DELETE"} traces_service_graph_request_failed_total{client="shop-backend",client_http_method="POST",failed="false",server="auth-service",server_http_method="POST"} -``` +``` + + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md b/docs/sources/reference/components/otelcol.connector.spanlogs.md similarity index 88% rename from docs/sources/flow/reference/components/otelcol.connector.spanlogs.md rename to docs/sources/reference/components/otelcol.connector.spanlogs.md index ec49e0509c..266bdd778e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md +++ b/docs/sources/reference/components/otelcol.connector.spanlogs.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanlogs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanlogs/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.spanlogs/ description: Learn about otelcol.connector.spanlogs title: otelcol.connector.spanlogs --- @@ -15,12 +10,14 @@ title: otelcol.connector.spanlogs components and outputs logs telemetry data for each span, root, or process. This allows you to automatically build a mechanism for trace discovery. -> **NOTE**: `otelcol.connector.spanlogs` is a custom component unrelated -> to any components from the OpenTelemetry Collector. It is based on the -> `automatic_logging` component in the [traces]({{< relref "../../../static/configuration/traces-config" >}}) subsystem of the Agent static mode. +{{< admonition type="note" >}} +`otelcol.connector.spanlogs` is a custom component unrelated to any components from the OpenTelemetry Collector. +It is based on the `automatic_logging` component in the [traces][] subsystem of Grafana Agent Static. + +[traces]: https://grafana.com/docs/agent/latest/static/configuration/traces-config +{{< /admonition >}} -You can specify multiple `otelcol.connector.spanlogs` components by giving them -different labels. +You can specify multiple `otelcol.connector.spanlogs` components by giving them different labels. ## Usage @@ -47,7 +44,9 @@ otelcol.connector.spanlogs "LABEL" { The values listed in `labels` should be the values of either span or process attributes. -> **WARNING**: Setting `spans` to `true` could lead to a high volume of logs. +{{< admonition type="warning" >}} +Setting `spans` to `true` could lead to a high volume of logs. +{{< /admonition >}} ## Blocks @@ -79,7 +78,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-logs.md" source="alloy" version="" >}} ## Exported fields @@ -89,18 +88,15 @@ The following fields are exported and can be referenced by other components: | ------- | ------------------ | ---------------------------------------------------------------- | | `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. | -`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, -logs, or traces). +`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). ## Component health -`otelcol.connector.spanlogs` is only reported as unhealthy if given an invalid -configuration. +`otelcol.connector.spanlogs` is only reported as unhealthy if given an invalid configuration. ## Debug information -`otelcol.connector.spanlogs` does not expose any component-specific debug -information. +`otelcol.connector.spanlogs` does not expose any component-specific debug information. ## Example diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/reference/components/otelcol.connector.spanmetrics.md similarity index 96% rename from docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md rename to docs/sources/reference/components/otelcol.connector.spanmetrics.md index ffc5f408cc..d010475c90 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/reference/components/otelcol.connector.spanmetrics.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanmetrics/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanmetrics/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.spanmetrics/ description: Learn about otelcol.connector.spanmetrics labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.connector.spanmetrics # otelcol.connector.spanmetrics -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.connector.spanmetrics` accepts span data from other `otelcol` components and aggregates Request, Error and Duration (R.E.D) OpenTelemetry metrics from the spans: @@ -171,8 +166,8 @@ The `explicit` block configures a histogram with explicit buckets. The following attributes are supported: -| Name | Type | Description | Default | Required | -| --------- | ---------------- | -------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | -------- | +| Name | Type | Description | Default | Required | +| --------- | ---------------- | -------------------------- | ------------------------------------------ | -------- | | `buckets` | `list(duration)` | List of histogram buckets. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no | ### exemplars block @@ -187,7 +182,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields @@ -626,6 +621,7 @@ but different resource attributes, `otelcol.exporter.prometheus` will convert th This problem can be solved by doing **either** of the following: - **Recommended approach:** Prior to `otelcol.connector.spanmetrics`, remove all resource attributes from the incoming spans which are not needed by `otelcol.connector.spanmetrics`. + {{< collapse title="Example River configuration to remove unnecessary resource attributes." >}} ```river otelcol.receiver.otlp "default" { diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md similarity index 79% rename from docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md rename to docs/sources/reference/components/otelcol.exporter.loadbalancing.md index f25e28bfa3..58595b58e1 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loadbalancing/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.loadbalancing/ description: Learn about otelcol.exporter.loadbalancing labels: stage: beta @@ -13,29 +8,30 @@ title: otelcol.exporter.loadbalancing # otelcol.exporter.loadbalancing -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.exporter.loadbalancing` accepts logs and traces from other `otelcol` components -and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. +and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. -> **NOTE**: `otelcol.exporter.loadbalancing` is a wrapper over the upstream -> OpenTelemetry Collector `loadbalancing` exporter. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.exporter.loadbalancing` is a wrapper over the upstream OpenTelemetry Collector `loadbalancing` exporter. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} Multiple `otelcol.exporter.loadbalancing` components can be specified by giving them different labels. -The decision which backend to use depends on the trace ID or the service name. -The backend load doesn't influence the choice. Even though this load-balancer won't do -round-robin balancing of the batches, the load distribution should be very similar among backends, +The decision which backend to use depends on the trace ID or the service name. +The backend load doesn't influence the choice. Even though this load-balancer won't do +round-robin balancing of the batches, the load distribution should be very similar among backends, with a standard deviation under 5% at the current configuration. `otelcol.exporter.loadbalancing` is especially useful for backends configured with tail-based samplers which choose a backend based on the view of the full trace. -When a list of backends is updated, some of the signals will be rerouted to different backends. +When a list of backends is updated, some of the signals will be rerouted to different backends. Around R/N of the "routes" will be rerouted differently, where: * A "route" is either a trace ID or a service name mapped to a certain backend. @@ -63,13 +59,13 @@ otelcol.exporter.loadbalancing "LABEL" { `otelcol.exporter.loadbalancing` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +--------------|----------|--------------------------------------|-------------|--------- `routing_key` | `string` | Routing strategy for load balancing. | `"traceID"` | no The `routing_key` attribute determines how to route signals across endpoints. Its value could be one of the following: * `"service"`: spans with the same `service.name` will be exported to the same backend. -This is useful when using processors like the span metrics, so all spans for each service are sent to consistent Agent instances +This is useful when using processors like the span metrics, so all spans for each service are sent to consistent Agent instances for metric collection. Otherwise, metrics for the same services would be sent to different Agents, making aggregations inaccurate. * `"traceID"`: spans belonging to the same traceID will be exported to the same backend. @@ -78,20 +74,20 @@ for metric collection. Otherwise, metrics for the same services would be sent to The following blocks are supported inside the definition of `otelcol.exporter.loadbalancing`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -resolver | [resolver][] | Configures discovering the endpoints to export to. | yes -resolver > static | [static][] | Static list of endpoints to export to. | no -resolver > dns | [dns][] | DNS-sourced list of endpoints to export to. | no -resolver > kubernetes | [kubernetes][] | Kubernetes-sourced list of endpoints to export to. | no -protocol | [protocol][] | Protocol settings. Only OTLP is supported at the moment. | no -protocol > otlp | [otlp][] | Configures an OTLP exporter. | no -protocol > otlp > client | [client][] | Configures the exporter gRPC client. | no -protocol > otlp > client > tls | [tls][] | Configures TLS for the gRPC client. | no -protocol > otlp > client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no -protocol > otlp > queue | [queue][] | Configures batching of data before sending. | no -protocol > otlp > retry | [retry][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no +Hierarchy | Block | Description | Required +-------------------------------------|-------------------|----------------------------------------------------------------------------|--------- +resolver | [resolver][] | Configures discovering the endpoints to export to. | yes +resolver > static | [static][] | Static list of endpoints to export to. | no +resolver > dns | [dns][] | DNS-sourced list of endpoints to export to. | no +resolver > kubernetes | [kubernetes][] | Kubernetes-sourced list of endpoints to export to. | no +protocol | [protocol][] | Protocol settings. Only OTLP is supported at the moment. | no +protocol > otlp | [otlp][] | Configures an OTLP exporter. | no +protocol > otlp > client | [client][] | Configures the exporter gRPC client. | no +protocol > otlp > client > tls | [tls][] | Configures TLS for the gRPC client. | no +protocol > otlp > client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no +protocol > otlp > queue | [queue][] | Configures batching of data before sending. | no +protocol > otlp > retry | [retry][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `resolver > static` refers to a `static` block defined inside a `resolver` block. @@ -113,8 +109,8 @@ refers to a `static` block defined inside a `resolver` block. The `resolver` block configures how to retrieve the endpoint to which this exporter will send data. -Inside the `resolver` block, either the [dns][] block or the [static][] block -should be specified. If both `dns` and `static` are specified, `dns` takes precedence. +Inside the `resolver` block, either the [dns][] block or the [static][] block should be specified. +If both `dns` and `static` are specified, `dns` takes precedence. ### static block @@ -122,9 +118,9 @@ The `static` block configures a list of endpoints which this exporter will send The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`hostnames` | `list(string)` | List of endpoints to export to. | | yes +Name | Type | Description | Default | Required +------------|----------------|---------------------------------|---------|--------- +`hostnames` | `list(string)` | List of endpoints to export to. | | yes ### dns block @@ -134,11 +130,11 @@ as the endpoint to which to export data to. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`hostname` | `string` | DNS hostname to resolve. | | yes -`interval` | `duration` | Resolver interval. | `"5s"` | no -`timeout` | `duration` | Resolver timeout. | `"1s"` | no +Name | Type | Description | Default | Required +-----------|------------|-----------------------------------------------------------------------|----------|--------- +`hostname` | `string` | DNS hostname to resolve. | | yes +`interval` | `duration` | Resolver interval. | `"5s"` | no +`timeout` | `duration` | Resolver timeout. | `"1s"` | no `port` | `string` | Port to be used with the IP addresses resolved from the DNS hostname. | `"4317"` | no ### kubernetes block @@ -149,9 +145,9 @@ The `kubernetes` resolver has a much faster response time than the `dns` resolve The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`service` | `string` | Kubernetes service to resolve. | | yes +Name | Type | Description | Default | Required +----------|----------------|-------------------------------------------------------------|----------|--------- +`service` | `string` | Kubernetes service to resolve. | | yes `ports` | `list(number)` | Ports to use with the IP addresses resolved from `service`. | `[4317]` | no If no namespace is specified inside `service`, an attempt will be made to infer the namespace for this Agent. @@ -178,22 +174,22 @@ The endpoints used by the client block are the ones from the `resolver` block The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} You can configure an HTTP proxy with the following environment variables: @@ -219,10 +215,9 @@ able to handle and proxy HTTP/2 traffic. ### tls block -The `tls` block configures TLS settings used for the connection to the gRPC -server. +The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -231,36 +226,34 @@ connections. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no ### queue block -The `queue` block configures an in-memory buffer of batches before data is sent -to the gRPC server. +The `queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} ### retry block -The `retry` block configures how failed requests to the gRPC server are -retried. +The `retry` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for telemetry signals of these types: @@ -275,7 +268,7 @@ Name | Type | Description Different {{< param "PRODUCT_NAME" >}} components require different load-balancing strategies. The use of `otelcol.exporter.loadbalancing` is only necessary for [stateful Flow components][stateful-and-stateless-components]. -[stateful-and-stateless-components]: {{< relref "../../get-started/deploy-agent.md#stateful-and-stateless-components" >}} +[stateful-and-stateless-components]: ../../../get-started/deploy-alloy/#stateful-and-stateless-components ### otelcol.processor.tail_sampling diff --git a/docs/sources/flow/reference/components/otelcol.exporter.logging.md b/docs/sources/reference/components/otelcol.exporter.logging.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.exporter.logging.md rename to docs/sources/reference/components/otelcol.exporter.logging.md index 51a044b130..942f7e6bf0 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.logging.md +++ b/docs/sources/reference/components/otelcol.exporter.logging.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.logging/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.logging/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.logging/ description: Learn about otelcol.exporter.logging title: otelcol.exporter.logging --- @@ -17,14 +12,15 @@ and writes them to the console. This component writes logs at the info level. The [logging config block][] must be configured to write logs at the info level. -> **NOTE**: `otelcol.exporter.logging` is a wrapper over the upstream -> OpenTelemetry Collector `logging` exporter. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.exporter.logging` is a wrapper over the upstream OpenTelemetry Collector `logging` exporter. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} Multiple `otelcol.exporter.logging` components can be specified by giving them different labels. -[logging config block]: {{< relref "../config-blocks/logging.md" >}} +[logging config block]: ../../config-blocks/logging/ ## Usage @@ -36,11 +32,11 @@ otelcol.exporter.logging "LABEL" { } `otelcol.exporter.logging` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`verbosity` | `string` | Verbosity of the generated logs. | `"normal"` | no -`sampling_initial` | `int` | Number of messages initially logged each second. | `2` | no -`sampling_thereafter` | `int` | Sampling rate after the initial messages are logged. | `500` | no +Name | Type | Description | Default | Required +----------------------|----------|------------------------------------------------------|------------|--------- +`verbosity` | `string` | Verbosity of the generated logs. | `"normal"` | no +`sampling_initial` | `int` | Number of messages initially logged each second. | `2` | no +`sampling_thereafter` | `int` | Sampling rate after the initial messages are logged. | `500` | no The `verbosity` argument must be one of `"basic"`, `"normal"`, or `"detailed"`. @@ -49,8 +45,8 @@ The `verbosity` argument must be one of `"basic"`, `"normal"`, or `"detailed"`. The following blocks are supported inside the definition of `otelcol.exporter.logging`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +--------------|-------------------|----------------------------------------------------------------------------|--------- debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `client > tls` @@ -60,14 +56,14 @@ refers to a `tls` block defined inside a `client` block. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loki.md b/docs/sources/reference/components/otelcol.exporter.loki.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.exporter.loki.md rename to docs/sources/reference/components/otelcol.exporter.loki.md index 8fe0d1ec83..f6e73c7b22 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loki.md +++ b/docs/sources/reference/components/otelcol.exporter.loki.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loki/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loki/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.loki/ description: Learn about otelcol.exporter.loki title: otelcol.exporter.loki --- @@ -15,8 +10,9 @@ title: otelcol.exporter.loki components, converts them to Loki-formatted log entries, and forwards them to `loki` components. -> **NOTE**: `otelcol.exporter.loki` is a custom component unrelated to the -> `lokiexporter` from the OpenTelemetry Collector. +{{< admonition type="note" >}} +`otelcol.exporter.loki` is a custom component unrelated to the `lokiexporter` from the OpenTelemetry Collector. +{{< /admonition >}} The attributes of the OTLP log are not converted to Loki attributes by default. To convert them, the OTLP log should contain special "hint" attributes: @@ -25,12 +21,10 @@ To convert them, the OTLP log should contain special "hint" attributes: * To convert OTLP log attributes to Loki labels, use the `loki.attribute.labels` hint attribute. -Labels will be translated to a [Prometheus format][], which is more constrained -than the OTLP format. For examples on label translation, see the -[Converting OTLP attributes to Loki labels][] section. +Labels will be translated to a [Prometheus format][], which is more constrained than the OTLP format. +For examples on label translation, see the [Converting OTLP attributes to Loki labels][] section. -Multiple `otelcol.exporter.loki` components can be specified by giving them -different labels. +Multiple `otelcol.exporter.loki` components can be specified by giving them different labels. [Converting OTLP attributes to Loki labels]: #converting-otlp-attributes-to-loki-labels @@ -46,16 +40,16 @@ otelcol.exporter.loki "LABEL" { `otelcol.exporter.loki` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(receiver)` | Where to forward converted Loki logs. | | yes +Name | Type | Description | Default | Required +-------------|------------------|---------------------------------------|---------|--------- +`forward_to` | `list(receiver)` | Where to forward converted Loki logs. | | yes ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for logs. Other telemetry signals are ignored. @@ -108,16 +102,16 @@ The example below will convert the following attributes to Loki labels: Labels will be translated to a [Prometheus format][]. For example: -| OpenTelemetry Attribute | Prometheus Label | -|---|---| -| `name` | `name` | -| `host.name` | `host_name` | -| `host_name` | `host_name` | -| `name (of the host)` | `name__of_the_host_` | -| `2 cents` | `key_2_cents` | -| `__name` | `__name` | -| `_name` | `key_name` | -| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | +| OpenTelemetry Attribute | Prometheus Label | +|-------------------------|-------------------------------------------------------| +| `name` | `name` | +| `host.name` | `host_name` | +| `host_name` | `host_name` | +| `name (of the host)` | `name__of_the_host_` | +| `2 cents` | `key_2_cents` | +| `__name` | `__name` | +| `_name` | `key_name` | +| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | ```river otelcol.receiver.otlp "default" { @@ -134,13 +128,13 @@ otelcol.processor.attributes "default" { action = "insert" value = "event.domain, event.name" } - + action { key = "loki.resource.labels" action = "insert" value = "service.name, service.namespace" } - + output { logs = [otelcol.exporter.loki.default.input] } diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/reference/components/otelcol.exporter.otlp.md similarity index 51% rename from docs/sources/flow/reference/components/otelcol.exporter.otlp.md rename to docs/sources/reference/components/otelcol.exporter.otlp.md index fce2576d8e..6230d97b35 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/reference/components/otelcol.exporter.otlp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlp/ description: Learn about otelcol.exporter.otlp title: otelcol.exporter.otlp --- @@ -35,23 +30,23 @@ otelcol.exporter.otlp "LABEL" { `otelcol.exporter.otlp` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`timeout` | `duration` | Time to wait before marking a request as failed. | `"5s"` | no +Name | Type | Description | Default | Required +----------|------------|--------------------------------------------------|---------|--------- +`timeout` | `duration` | Time to wait before marking a request as failed. | `"5s"` | no ## Blocks The following blocks are supported inside the definition of `otelcol.exporter.otlp`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures the gRPC server to send telemetry data to. | yes -client > tls | [tls][] | Configures TLS for the gRPC client. | no -client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no -sending_queue | [sending_queue][] | Configures batching of data before sending. | no -retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no +Hierarchy | Block | Description | Required +-------------------|----------------------|----------------------------------------------------------------------------|--------- +client | [client][] | Configures the gRPC server to send telemetry data to. | yes +client > tls | [tls][] | Configures TLS for the gRPC client. | no +client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no +sending_queue | [sending_queue][] | Configures batching of data before sending. | no +retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `client > tls` refers to a `tls` block defined inside a `client` block. @@ -69,23 +64,23 @@ The `client` block configures the gRPC client used by the component. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to send telemetry data to. | | yes -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`endpoint` | `string` | `host:port` to send telemetry data to. | | yes +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} An HTTP proxy can be configured through the following environment variables: @@ -114,50 +109,50 @@ able to handle and proxy HTTP/2 traffic. The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} -> **NOTE**: `otelcol.exporter.otlp` uses gRPC, which does not allow you to send sensitive credentials (like `auth`) over insecure channels. -> Sending sensitive credentials over insecure non-TLS connections is supported by non-gRPC exporters such as [otelcol.exporter.otlphttp][]. +{{< admonition type="note" >}} +`otelcol.exporter.otlp` uses gRPC, which does not allow you to send sensitive credentials (like `auth`) over insecure channels. +Sending sensitive credentials over insecure non-TLS connections is supported by non-gRPC exporters such as [otelcol.exporter.otlphttp][]. -[otelcol.exporter.otlphttp]: {{< relref "./otelcol.exporter.otlphttp.md" >}} +[otelcol.exporter.otlphttp]: ../otelcol.exporter.otlphttp/ +{{< /admonition >}} ### keepalive block -The `keepalive` block configures keepalive settings for gRPC client -connections. +The `keepalive` block configures keepalive settings for gRPC client connections. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no ### sending_queue block The `sending_queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} ### retry_on_failure block -The `retry_on_failure` block configures how failed requests to the gRPC server are -retried. +The `retry_on_failure` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/reference/components/otelcol.exporter.otlphttp.md new file mode 100644 index 0000000000..aa643c15d3 --- /dev/null +++ b/docs/sources/reference/components/otelcol.exporter.otlphttp.md @@ -0,0 +1,166 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlphttp/ +description: Learn about otelcol.exporter.otlphttp +title: otelcol.exporter.otlphttp +--- + +# otelcol.exporter.otlphttp + +`otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` +components and writes them over the network using the OTLP HTTP protocol. + +> **NOTE**: `otelcol.exporter.otlphttp` is a wrapper over the upstream +> OpenTelemetry Collector `otlphttp` exporter. Bug reports or feature requests +> will be redirected to the upstream repository, if necessary. + +Multiple `otelcol.exporter.otlphttp` components can be specified by giving them +different labels. + +## Usage + +```river +otelcol.exporter.otlphttp "LABEL" { + client { + endpoint = "HOST:PORT" + } +} +``` + +## Arguments + +`otelcol.exporter.otlphttp` supports the following arguments: + +Name | Type | Description | Default | Required +-------------------|----------|----------------------------------|-----------------------------------|--------- +`metrics_endpoint` | `string` | The endpoint to send metrics to. | `client.endpoint + "/v1/metrics"` | no +`logs_endpoint` | `string` | The endpoint to send logs to. | `client.endpoint + "/v1/logs"` | no +`traces_endpoint` | `string` | The endpoint to send traces to. | `client.endpoint + "/v1/traces"` | no + +The default value depends on the `endpoint` field set in the required `client` +block. If set, these arguments override the `client.endpoint` field for the +corresponding signal. + +## Blocks + +The following blocks are supported inside the definition of +`otelcol.exporter.otlphttp`: + +Hierarchy | Block | Description | Required +-----------------|----------------------|----------------------------------------------------------------------------|--------- +client | [client][] | Configures the HTTP server to send telemetry data to. | yes +client > tls | [tls][] | Configures TLS for the HTTP client. | no +sending_queue | [sending_queue][] | Configures batching of data before sending. | no +retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no + +The `>` symbol indicates deeper levels of nesting. For example, `client > tls` +refers to a `tls` block defined inside a `client` block. + +[client]: #client-block +[tls]: #tls-block +[sending_queue]: #sending_queue-block +[retry_on_failure]: #retry_on_failure-block +[debug_metrics]: #debug_metrics-block + +### client block + +The `client` block configures the HTTP client used by the component. + +The following arguments are supported: + +Name | Type | Description | Default | Required +--------------------------|----------------------------|------------------------------------------------------------------------------|------------|--------- +`endpoint` | `string` | The target URL to send telemetry data to. | | yes +`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no +`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no +`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no +`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no +`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no +`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no +`disable_keep_alives` | `bool` | Disable HTTP keep-alive. | `false` | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no + +Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. +Before enabling this option, consider whether changes to idle connection settings can achieve your goal. + +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} + +### tls block + +The `tls` block configures TLS settings used for the connection to the HTTP +server. + +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} + +### sending_queue block + +The `sending_queue` block configures an in-memory buffer of batches before data is sent +to the HTTP server. + +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} + +### retry_on_failure block + +The `retry_on_failure` block configures how failed requests to the HTTP server are +retried. + +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} + +### debug_metrics block + +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- +`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. + +`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, +logs, or traces). + +## Component health + +`otelcol.exporter.otlphttp` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.exporter.otlphttp` does not expose any component-specific debug +information. + +## Example + +This example creates an exporter to send data to a locally running Grafana +Tempo without TLS: + +```river +otelcol.exporter.otlphttp "tempo" { + client { + endpoint = "http://tempo:4317" + tls { + insecure = true + insecure_skip_verify = true + } + } +} +``` + + +## Compatible components + +`otelcol.exporter.otlphttp` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-consumers) + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/reference/components/otelcol.exporter.prometheus.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.exporter.prometheus.md rename to docs/sources/reference/components/otelcol.exporter.prometheus.md index 33328e6d2a..fc21c7cf30 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/reference/components/otelcol.exporter.prometheus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.prometheus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.prometheus/ description: Learn about otelcol.exporter.prometheus title: otelcol.exporter.prometheus --- @@ -15,16 +10,16 @@ title: otelcol.exporter.prometheus `otelcol` components, converts metrics to Prometheus-formatted metrics, and forwards the resulting metrics to `prometheus` components. -> **NOTE**: `otelcol.exporter.prometheus` is a custom component unrelated to the -> `prometheus` exporter from OpenTelemetry Collector. -> -> Conversion of metrics are done according to the OpenTelemetry -> [Metrics Data Model][] specification. +{{< admonition type="note" >}} +`otelcol.exporter.prometheus` is a custom component unrelated to the `prometheus` exporter from OpenTelemetry Collector. -Multiple `otelcol.exporter.prometheus` components can be specified by giving them -different labels. +Conversion of metrics are done according to the OpenTelemetry [Metrics Data Model][] specification. [Metrics Data Model]: https://opentelemetry.io/docs/reference/specification/metrics/data-model/ +{{< /admonition >}} + +Multiple `otelcol.exporter.prometheus` components can be specified by giving them +different labels. ## Usage @@ -38,17 +33,17 @@ otelcol.exporter.prometheus "LABEL" { `otelcol.exporter.prometheus` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- |-----------------------------------------------------------| ------- | -------- -`include_target_info` | `boolean` | Whether to include `target_info` metrics. | `true` | no -`include_scope_info` | `boolean` | Whether to include `otel_scope_info` metrics. | `false` | no -`include_scope_labels` | `boolean` | Whether to include additional OTLP labels in all metrics. | `true` | no -`add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no -`gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no -`forward_to` | `list(MetricsReceiver)` | Where to forward converted Prometheus metrics. | | yes -`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no - -By default, OpenTelemetry resources are converted into `target_info` metrics. +Name | Type | Description | Default | Required +-----------------------------------|-------------------------|-------------------------------------------------------------------|---------|--------- +`include_target_info` | `boolean` | Whether to include `target_info` metrics. | `true` | no +`include_scope_info` | `boolean` | Whether to include `otel_scope_info` metrics. | `false` | no +`include_scope_labels` | `boolean` | Whether to include additional OTLP labels in all metrics. | `true` | no +`add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no +`gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no +`forward_to` | `list(MetricsReceiver)` | Where to forward converted Prometheus metrics. | | yes +`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no + +By default, OpenTelemetry resources are converted into `target_info` metrics. OpenTelemetry instrumentation scopes are converted into `otel_scope_info` metrics. Set the `include_scope_info` and `include_target_info` arguments to `false`, respectively, to disable the custom metrics. @@ -59,23 +54,21 @@ When `include_scope_labels` is `true` the `otel_scope_name` and When `include_target_info` is true, OpenTelemetry Collector resources are converted into `target_info` metrics. {{< admonition type="note" >}} - -OTLP metrics can have a lot of resource attributes. +OTLP metrics can have a lot of resource attributes. Setting `resource_to_telemetry_conversion` to `true` would convert all of them to Prometheus labels, which may not be what you want. -Instead of using `resource_to_telemetry_conversion`, most users need to use `otelcol.processor.transform` -to convert OTLP resource attributes to OTLP metric datapoint attributes before using `otelcol.exporter.prometheus`. +Instead of using `resource_to_telemetry_conversion`, most users need to use `otelcol.processor.transform` +to convert OTLP resource attributes to OTLP metric datapoint attributes before using `otelcol.exporter.prometheus`. See [Creating Prometheus labels from OTLP resource attributes][] for an example. [Creating Prometheus labels from OTLP resource attributes]: #creating-prometheus-labels-from-otlp-resource-attributes - {{< /admonition >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for metrics. Other telemetry signals are ignored. @@ -89,13 +82,11 @@ The following are dropped during the conversion process: ## Component health -`otelcol.exporter.prometheus` is only reported as unhealthy if given an invalid -configuration. +`otelcol.exporter.prometheus` is only reported as unhealthy if given an invalid configuration. ## Debug information -`otelcol.exporter.prometheus` does not expose any component-specific debug -information. +`otelcol.exporter.prometheus` does not expose any component-specific debug information. ## Example @@ -127,7 +118,7 @@ prometheus.remote_write "mimir" { ## Create Prometheus labels from OTLP resource attributes This example uses `otelcol.processor.transform` to add extra `key1` and `key2` OTLP metric datapoint attributes from the -`key1` and `key2` OTLP resource attributes. +`key1` and `key2` OTLP resource attributes. `otelcol.exporter.prometheus` then converts `key1` and `key2` to Prometheus labels along with any other OTLP metric datapoint attributes. diff --git a/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md new file mode 100644 index 0000000000..140c2d2c6d --- /dev/null +++ b/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md @@ -0,0 +1,301 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.extension.jaeger_remote_sampling/ +description: Learn about otelcol.extension.jaeger_remote_sampling +label: + stage: experimental +title: otelcol.extension.jaeger_remote_sampling +--- + +# otelcol.extension.jaeger_remote_sampling + +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} + +`otelcol.extension.jaeger_remote_sampling` serves a specified Jaeger remote sampling document. + +{{< admonition type="note" >}} +`otelcol.extension.jaeger_remote_sampling` is a wrapper over the upstream OpenTelemetry Collector `jaegerremotesampling` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} + +Multiple `otelcol.extension.jaeger_remote_sampling` components can be specified by giving them different labels. + +## Usage + +```river +otelcol.extension.jaeger_remote_sampling "LABEL" { + source { + } +} +``` + +## Arguments + +`otelcol.extension.jaeger_remote_sampling` doesn't support any arguments and is configured fully through inner blocks. + +## Blocks + +The following blocks are supported inside the definition of +`otelcol.extension.jaeger_remote_sampling`: + +Hierarchy | Block | Description | Required +--------------------------------------|------------------------|----------------------------------------------------------------------------------|--------- +http | [http][] | Configures the http server to serve Jaeger remote sampling. | no +http > tls | [tls][] | Configures TLS for the HTTP server. | no +http > cors | [cors][] | Configures CORS for the HTTP server. | no +grpc | [grpc][] | Configures the grpc server to serve Jaeger remote sampling. | no +grpc > tls | [tls][] | Configures TLS for the gRPC server. | no +grpc > keepalive | [keepalive][] | Configures keepalive settings for the configured server. | no +grpc > keepalive > server_parameters | [server_parameters][] | Server parameters used to configure keepalive settings. | no +grpc > keepalive > enforcement_policy | [enforcement_policy][] | Enforcement policy for keepalive settings. | no +source | [source][] | Configures the Jaeger remote sampling document. | yes +source > remote | [remote][] | Configures the gRPC client used to retrieve the Jaeger remote sampling document. | no +source > remote > tls | [tls][] | Configures TLS for the gRPC client. | no +source > remote > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no + +The `>` symbol indicates deeper levels of nesting. For example, `grpc > tls` +refers to a `tls` block defined inside a `grpc` block. + +[http]: #http-block +[tls]: #tls-block +[cors]: #cors-block +[grpc]: #grpc-block +[keepalive]: #keepalive-block +[server_parameters]: #server_parameters-block +[enforcement_policy]: #enforcement_policy-block +[source]: #source-block +[remote]: #remote-block +[tls_client]: #tls-client-block +[keepalive_client]: #keepalive-client-block + +### http block + +The `http` block configures an HTTP server which serves the Jaeger remote sampling document. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|-----------|-----------------------------------------------------------------------|------------------|--------- +`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:5778"` | no +`max_request_body_size` | `string` | Maximum request body size the server will allow. No limit when unset. | | no +`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no + +### tls block + +The `tls` block configures TLS settings used for a server. If the `tls` block +isn't provided, TLS won't be used for connections to the server. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|------------|---------------------------------------------------------------|-------------|--------- +`ca_file` | `string` | Path to the CA file. | | no +`cert_file` | `string` | Path to the TLS certificate. | | no +`key_file` | `string` | Path to the TLS certificate key. | | no +`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no +`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no +`reload_interval` | `duration` | Frequency to reload the certificates. | | no +`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no + +### cors block + +The `cors` block configures CORS settings for an HTTP server. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|----------------|----------------------------------------------------------|------------------------|--------- +`allowed_origins` | `list(string)` | Allowed values for the `Origin` header. | | no +`allowed_headers` | `list(string)` | Accepted headers from CORS requests. | `["X-Requested-With"]` | no +`max_age` | `number` | Configures the `Access-Control-Max-Age` response header. | | no + +The `allowed_headers` specifies which headers are acceptable from a CORS +request. The following headers are always implicitly allowed: + +* `Accept` +* `Accept-Language` +* `Content-Type` +* `Content-Language` + +If `allowed_headers` includes `"*"`, all headers will be permitted. + +### grpc block + +The `grpc` block configures a gRPC server which serves the Jaeger remote + sampling document. + +The following arguments are supported: + +Name | Type | Description | Default | Required +-------------------------|-----------|----------------------------------------------------------------------------|-------------------|--------- +`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:14250"` | no +`transport` | `string` | Transport to use for the gRPC server. | `"tcp"` | no +`max_recv_msg_size` | `string` | Maximum size of messages the server will accept. 0 disables a limit. | | no +`max_concurrent_streams` | `number` | Limit the number of concurrent streaming RPC calls. | | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC server will use for reading from clients. | `"512KiB"` | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC server will use for writing to clients. | | no +`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no + +### keepalive block + +The `keepalive` block configures keepalive settings for connections to a gRPC +server. + +`keepalive` doesn't support any arguments and is configured fully through inner +blocks. + +### server_parameters block + +The `server_parameters` block controls keepalive and maximum age settings for gRPC +servers. + +The following arguments are supported: + +Name | Type | Description | Default | Required +---------------------------|------------|--------------------------------------------------------------------------------------|--------------|--------- +`max_connection_idle` | `duration` | Maximum age for idle connections. | `"infinity"` | no +`max_connection_age` | `duration` | Maximum age for non-idle connections. | `"infinity"` | no +`max_connection_age_grace` | `duration` | Time to wait before forcibly closing connections. | `"infinity"` | no +`time` | `duration` | How often to ping inactive clients to check for liveness. | `"2h"` | no +`timeout` | `duration` | Time to wait before closing inactive clients that do not respond to liveness checks. | `"20s"` | no + +### enforcement_policy block + +The `enforcement_policy` block configures the keepalive enforcement policy for +gRPC servers. The server will close connections from clients that violate the +configured policy. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|------------|-------------------------------------------------------------------------|---------|--------- +`min_time` | `duration` | Minimum time clients should wait before sending a keepalive ping. | `"5m"` | no +`permit_without_stream` | `boolean` | Allow clients to send keepalive pings when there are no active streams. | `false` | no + +### source block + +The `source` block configures the method of retrieving the Jaeger remote sampling document +that is served by the servers specified in the `grpc` and `http` blocks. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|------------|---------------------------------------------------------------------------------|---------|--------- +`file` | `string` | A local file containing a Jaeger remote sampling document. | `""` | no +`reload_interval` | `duration` | The interval at which to reload the specified file. Leave at 0 to never reload. | `0` | no +`content` | `string` | A string containing the Jaeger remote sampling contents directly. | `""` | no + +Exactly one of the `file` argument, `content` argument or `remote` block must be specified. + +### remote block + +The `remote` block configures the gRPC client used by the component. + +The following arguments are supported: + +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`endpoint` | `string` | `host:port` to send telemetry data to. | | yes +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no + +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} + +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} + +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} + +An HTTP proxy can be configured through the following environment variables: + +* `HTTPS_PROXY` +* `NO_PROXY` + +The `HTTPS_PROXY` environment variable specifies a URL to use for proxying +requests. Connections to the proxy are established via [the `HTTP CONNECT` +method][HTTP CONNECT]. + +The `NO_PROXY` environment variable is an optional list of comma-separated +hostnames for which the HTTPS proxy should _not_ be used. Each hostname can be +provided as an IP address (`1.2.3.4`), an IP address in CIDR notation +(`1.2.3.4/8`), a domain name (`example.com`), or `*`. A domain name matches +that domain and all subdomains. A domain name with a leading "." +(`.example.com`) matches subdomains only. `NO_PROXY` is only read when +`HTTPS_PROXY` is set. + +Because `otelcol.extension.jaeger_remote_sampling` uses gRPC, the configured proxy server must be +able to handle and proxy HTTP/2 traffic. + +[HTTP CONNECT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT + +### tls client block + +The `tls` block configures TLS settings used for the connection to the gRPC +server. + +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} + +### keepalive client block + +The `keepalive` block configures keepalive settings for gRPC client +connections. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no + +## Component health + +`otelcol.extension.jaeger_remote_sampling` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.extension.jaeger_remote_sampling` does not expose any component-specific debug information. + +## Examples + +### Serving from a file + +This example configures the Jaeger remote sampling extension to load a local json document and +serve it over the default http port of 5778. Currently this config style exists for consistency +with upstream Opentelemetry Collector components and may be removed. + +```river +otelcol.extension.jaeger_remote_sampling "example" { + http { + } + source { + file = "/path/to/jaeger-sampling.json" + reload_interval = "10s" + } +} +``` + +### Serving from another component + + +This example uses the output of a component to determine what sampling +rules to serve: + +```river +local.file "sampling" { + filename = "/path/to/jaeger-sampling.json" +} + +otelcol.extension.jaeger_remote_sampling "example" { + http { + } + source { + content = local.file.sampling.content + } +} +``` diff --git a/docs/sources/flow/reference/components/otelcol.processor.attributes.md b/docs/sources/reference/components/otelcol.processor.attributes.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.attributes.md rename to docs/sources/reference/components/otelcol.processor.attributes.md index 6c07d1c713..c76c385b79 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.attributes.md +++ b/docs/sources/reference/components/otelcol.processor.attributes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.attributes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.attributes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.attributes/ description: Learn about otelcol.processor.attributes title: otelcol.processor.attributes --- @@ -13,15 +8,14 @@ title: otelcol.processor.attributes `otelcol.processor.attributes` accepts telemetry data from other `otelcol` components and modifies attributes of a span, log, or metric. -It also supports the ability to filter and match input data to determine if -it should be included or excluded for attribute modifications. +It also supports the ability to filter and match input data to determine if it should be included or excluded for attribute modifications. -> **NOTE**: `otelcol.processor.attributes` is a wrapper over the upstream -> OpenTelemetry Collector `attributes` processor. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.processor.attributes` is a wrapper over the upstream OpenTelemetry Collector `attributes` processor. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -You can specify multiple `otelcol.processor.attributes` components by giving them -different labels. +You can specify multiple `otelcol.processor.attributes` components by giving them different labels. ## Usage @@ -37,29 +31,29 @@ otelcol.processor.attributes "LABEL" { ## Arguments -`otelcol.processor.attributes` doesn't support any arguments and is configured fully -through inner blocks. +`otelcol.processor.attributes` doesn't support any arguments and is configured fully through inner blocks. ## Blocks The following blocks are supported inside the definition of `otelcol.processor.attributes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -action | [action][] | Actions to take on the attributes of incoming metrics/logs/traces. | no -include | [include][] | Filter for data included in this processor's actions. | no -include > regexp | [regexp][] | Regex cache settings. | no -include > attribute | [attribute][] | A list of attributes to match against. | no -include > resource | [resource][] | A list of items to match the resources against. | no -include > library | [library][] | A list of items to match the implementation library against. | no -include > log_severity | [library][] | How to match against a log record's SeverityNumber, if defined. | no -exclude | [exclude][] | Filter for data excluded from this processor's actions | no -exclude > regexp | [regexp][] | Regex cache settings. | no -exclude > attribute | [attribute][] | A list of attributes to match against. | no -exclude > resource | [resource][] | A list of items to match the resources against. | no -exclude > library | [library][] | A list of items to match the implementation library against. | no -exclude > log_severity | [log_severity][] | How to match against a log record's SeverityNumber, if defined. | no + +Hierarchy | Block | Description | Required +-----------------------|------------------|--------------------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +action | [action][] | Actions to take on the attributes of incoming metrics/logs/traces. | no +include | [include][] | Filter for data included in this processor's actions. | no +include > regexp | [regexp][] | Regex cache settings. | no +include > attribute | [attribute][] | A list of attributes to match against. | no +include > resource | [resource][] | A list of items to match the resources against. | no +include > library | [library][] | A list of items to match the implementation library against. | no +include > log_severity | [library][] | How to match against a log record's SeverityNumber, if defined. | no +exclude | [exclude][] | Filter for data excluded from this processor's actions | no +exclude > regexp | [regexp][] | Regex cache settings. | no +exclude > attribute | [attribute][] | A list of attributes to match against. | no +exclude > resource | [resource][] | A list of items to match the resources against. | no +exclude > library | [library][] | A list of items to match the implementation library against. | no +exclude > log_severity | [log_severity][] | How to match against a log record's SeverityNumber, if defined. | no The `>` symbol indicates deeper levels of nesting. For example, `include > attribute` refers to an `attribute` block defined inside an `include` block. @@ -82,15 +76,15 @@ The `action` block configures how to modify the span, log, or metric. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | The attribute that the action relates to. | | yes -`action` | `string` | The type of action performed. | | yes -`value` | `any` | The value to populate for the key. | | no -`pattern` | `string` | A regex pattern. | `""` | no -`from_attribute` | `string` | The attribute from the input data used to populate the attribute value. | `""` | no -`from_context` | `string` | The context value used to populate the attribute value. | `""` | no -`converted_type` | `string` | The type to convert the attribute value to. | `""` | no +Name | Type | Description | Default | Required +-----------------|----------|-------------------------------------------------------------------------|---------|--------- +`key` | `string` | The attribute that the action relates to. | | yes +`action` | `string` | The type of action performed. | | yes +`value` | `any` | The value to populate for the key. | | no +`pattern` | `string` | A regex pattern. | `""` | no +`from_attribute` | `string` | The attribute from the input data used to populate the attribute value. | `""` | no +`from_context` | `string` | The context value used to populate the attribute value. | `""` | no +`converted_type` | `string` | The type to convert the attribute value to. | `""` | no The `value` data type must be either a number, string, or boolean. @@ -106,7 +100,7 @@ The supported values for `action` are: * The `key`attribute is required. It specifies the attribute to act upon. * One of the `value`, `from_attribute` or `from_context` attributes is required. -* `upsert`: Either inserts a new attribute in input data where the key does not already exist +* `upsert`: Either inserts a new attribute in input data where the key does not already exist or updates an attribute in input data where the key does exist. * The `key`attribute is required. It specifies the attribute to act upon. @@ -116,10 +110,10 @@ The supported values for `action` are: the value. If the attribute doesn't exist, no action is performed. * `from_context` specifies the context value used to populate the attribute value. If the key is prefixed with `metadata.`, the values are searched - in the receiver's transport protocol for additional information like gRPC Metadata or HTTP Headers. + in the receiver's transport protocol for additional information like gRPC Metadata or HTTP Headers. If the key is prefixed with `auth.`, the values are searched - in the authentication information set by the server authenticator. - Refer to the server authenticator's documentation part of your pipeline + in the authentication information set by the server authenticator. + Refer to the server authenticator's documentation part of your pipeline for more information about which attributes are available. If the key doesn't exist, no action is performed. If the key has multiple values the values will be joined with a `;` separator. @@ -128,12 +122,12 @@ The supported values for `action` are: * The `key` attribute and/or the `pattern` attributes is required. -* `extract`: Extracts values using a regular expression rule from the input key to target keys specified in the rule. - If a target key already exists, it will be overridden. Note: It behaves similarly to the Span Processor `to_attributes` +* `extract`: Extracts values using a regular expression rule from the input key to target keys specified in the rule. + If a target key already exists, it will be overridden. Note: It behaves similarly to the Span Processor `to_attributes` setting with the existing attribute as the source. * The `key` attribute is required. It specifies the attribute to extract values from. The value of `key` is NOT altered. - * The `pattern` attribute is required. It is the regex pattern used to extract attributes from the value of `key`. + * The `pattern` attribute is required. It is the regex pattern used to extract attributes from the value of `key`. The submatchers must be named. If attributes already exist, they will be overwritten. * `convert`: Converts an existing attribute to a specified type. @@ -149,14 +143,14 @@ The supported values for `action` are: The `include` block provides an option to include data being fed into the [action] blocks based on the properties of a span, log, or metric records. -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/match-properties-block.md" source="alloy" version="" >}} One of the following is also required: -* For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified +* For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified with a non-empty value for a valid configuration. The `log_bodies`, `log_severity_texts`, `log_severity`, and `metric_names` attributes are invalid. -* For logs, one of `log_bodies`, `log_severity_texts`, `log_severity`, [attribute][], [resource][], or [library][] must be +* For logs, one of `log_bodies`, `log_severity_texts`, `log_severity`, [attribute][], [resource][], or [library][] must be specified with a non-empty value for a valid configuration. The `span_names`, `span_kinds`, `metric_names`, and `services` attributes are invalid. -* For metrics, `metric_names` must be specified with a valid non-empty value for a valid configuration. The `span_names`, +* For metrics, `metric_names` must be specified with a valid non-empty value for a valid configuration. The `span_names`, `span_kinds`, `log_bodies`, `log_severity_texts`, `log_severity`, `services`, [attribute][], [resource][], and [library][] attributes are invalid. If the configuration includes filters which are specific to a particular signal type, it is best to include only that signal type in the component's output. @@ -169,10 +163,12 @@ The `exclude` block provides an option to exclude data from being fed into the [ {{< admonition type="note" >}} Signals excluded by the `exclude` block will still be propagated to downstream components as-is. If you would like to not propagate certain signals to downstream components, -consider a processor such as [otelcol.processor.tail_sampling]({{< relref "./otelcol.processor.tail_sampling.md" >}}). +consider a processor such as [otelcol.processor.tail_sampling][]. + +[otelcol.processor.tail_sampling]: ../otelcol.processor.tail_sampling/ {{< /admonition >}} -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/match-properties-block.md" source="alloy" version="" >}} One of the following is also required: * For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified @@ -187,34 +183,34 @@ For example, adding a `span_names` filter could cause the component to error if ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-regexp-block.md" source="alloy" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-attribute-block.md" source="alloy" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-resource-block.md" source="alloy" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-library-block.md" source="alloy" version="" >}} ### log_severity block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-log-severity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-log-severity-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). diff --git a/docs/sources/flow/reference/components/otelcol.processor.batch.md b/docs/sources/reference/components/otelcol.processor.batch.md similarity index 78% rename from docs/sources/flow/reference/components/otelcol.processor.batch.md rename to docs/sources/reference/components/otelcol.processor.batch.md index 7b461c1168..13821b0253 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.batch.md +++ b/docs/sources/reference/components/otelcol.processor.batch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.batch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.batch/ description: Learn about otelcol.processor.batch title: otelcol.processor.batch --- @@ -16,11 +11,9 @@ components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. -We strongly recommend that you configure the batch processor on every Agent that -uses OpenTelemetry (otelcol) Flow components. The batch processor should be -defined in the pipeline after the `otelcol.processor.memory_limiter` as well -as any sampling processors. This is because batching should happen after any -data drops such as sampling. +We strongly recommend that you configure the batch processor on every {{< param "PRODUCT_NAME" >}} that uses OpenTelemetry (otelcol) {{< param "PRODUCT_ROOT_NAME" >}} components. +The batch processor should be defined in the pipeline after the `otelcol.processor.memory_limiter` as well as any sampling processors. +This is because batching should happen after any data drops such as sampling. > **NOTE**: `otelcol.processor.batch` is a wrapper over the upstream > OpenTelemetry Collector `batch` processor. Bug reports or feature requests @@ -45,13 +38,13 @@ otelcol.processor.batch "LABEL" { `otelcol.processor.batch` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`timeout` | `duration` | How long to wait before flushing the batch. | `"200ms"` | no -`send_batch_size` | `number` | Amount of data to buffer before flushing the batch. | `8192` | no -`send_batch_max_size` | `number` | Upper limit of a batch size. | `0` | no -`metadata_keys` | `list(string)` | Creates a different batcher for each key/value combination of metadata. | `[]` | no -`metadata_cardinality_limit` | `number` | Limit of the unique metadata key/value combinations. | `1000` | no +Name | Type | Description | Default | Required +-----------------------------|----------------|-------------------------------------------------------------------------|-----------|--------- +`timeout` | `duration` | How long to wait before flushing the batch. | `"200ms"` | no +`send_batch_size` | `number` | Amount of data to buffer before flushing the batch. | `8192` | no +`send_batch_max_size` | `number` | Upper limit of a batch size. | `0` | no +`metadata_keys` | `list(string)` | Creates a different batcher for each key/value combination of metadata. | `[]` | no +`metadata_cardinality_limit` | `number` | Limit of the unique metadata key/value combinations. | `1000` | no `otelcol.processor.batch` accumulates data into a batch until one of the following events happens: @@ -102,22 +95,22 @@ which defaults to 1000 to limit memory impact. The following blocks are supported inside the definition of `otelcol.processor.batch`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, @@ -226,7 +219,8 @@ otelcol.exporter.otlp "production" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/reference/components/otelcol.processor.discovery.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.discovery.md rename to docs/sources/reference/components/otelcol.processor.discovery.md index a294c8440d..4ce4b76e70 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/reference/components/otelcol.processor.discovery.md @@ -1,10 +1,6 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.discovery/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.discovery/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.discovery/ description: Learn about otelcol.processor.discovery title: otelcol.processor.discovery --- @@ -18,12 +14,10 @@ of labels for each discovered target. matching the one in the `__address__` label provided by the `discovery.*` component. {{< admonition type="note" >}} -`otelcol.processor.discovery` is a custom component unrelated to any -processors from the OpenTelemetry Collector. +`otelcol.processor.discovery` is a custom component unrelated to any processors from the OpenTelemetry Collector. {{< /admonition >}} -Multiple `otelcol.processor.discovery` components can be specified by giving them -different labels. +Multiple `otelcol.processor.discovery` components can be specified by giving them different labels. {{< admonition type="note" >}} It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when @@ -46,7 +40,7 @@ from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configur [Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels [OTEL sem conv]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/README.md -[Traces]: {{< relref "../../../static/configuration/traces-config.md" >}} +[Traces]: http://grafana.com/docs/agent/latest/static/configuration/traces-config/ {{< /admonition >}} ## Usage @@ -64,11 +58,11 @@ otelcol.processor.discovery "LABEL" { `otelcol.processor.discovery` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of target labels to apply to the spans. | | yes -`operation_type` | `string` | Configures whether to update a span's attribute if it already exists. | `upsert` | no -`pod_associations` | `list(string)` | Configures how to decide the hostname of the span. | `["ip", "net.host.ip", "k8s.pod.ip", "hostname", "connection"]` | no +Name | Type | Description | Default | Required +-----------------|---------------------|-----------------------------------------------------------------------|----------|--------- +`targets` | `list(map(string))` | List of target labels to apply to the spans. | | yes +`operation_type` | `string` | Configures whether to update a span's attribute if it already exists. | `upsert` | no +`pod_associations` | `list(string)` | Configures how to decide the hostname of the span. | `["ip", "net.host.ip", "k8s.pod.ip", "hostname", "connection"]` | no `targets` could come from `discovery.*` components: 1. The `__address__` label will be matched against the IP address of incoming spans. @@ -98,22 +92,22 @@ only if `"ip"` has not already matched. The following blocks are supported inside the definition of `otelcol.processor.discovery`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-traces.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for telemetry signals of these types: @@ -173,7 +167,7 @@ otelcol.processor.discovery "default" { ### Using a preconfigured list of attributes -It is not necessary to use a discovery component. In the example below, both a `test_label` and +It's not necessary to use a discovery component. In the example below, both a `test_label` and a `test.label.with.dots` resource attributes will be added to a span if its IP address is "1.2.2.2". The `__internal_label__` will be not be added to the span, because it begins with a double underscore (`__`). @@ -181,7 +175,7 @@ a double underscore (`__`). ```river otelcol.processor.discovery "default" { targets = [{ - "__address__" = "1.2.2.2", + "__address__" = "1.2.2.2", "__internal_label__" = "test_val", "test_label" = "test_val2", "test.label.with.dots" = "test.val2.with.dots"}] diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/reference/components/otelcol.processor.filter.md similarity index 89% rename from docs/sources/flow/reference/components/otelcol.processor.filter.md rename to docs/sources/reference/components/otelcol.processor.filter.md index c82be95aa0..75746831b1 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/reference/components/otelcol.processor.filter.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.filter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.filter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.filter/ description: Learn about otelcol.processor.filter labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.processor.filter # otelcol.processor.filter -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.filter` accepts and filters telemetry data from other `otelcol` components using the [OpenTelemetry Transformation Language (OTTL)][OTTL]. @@ -25,8 +20,7 @@ A path is a reference to a telemetry data such as: * Instrumentation scope name. * Span attributes. -In addition to the [standard OTTL Converter functions][OTTL Converter functions], -the following metrics-only functions are used exclusively by the processor: +In addition to the [standard OTTL Converter functions][OTTL Converter functions], the following metrics-only functions are used exclusively by the processor: * [HasAttrKeyOnDataPoint][] * [HasAttrOnDataPoint][] @@ -41,15 +35,12 @@ the following metrics-only functions are used exclusively by the processor: {{< admonition type="note" >}} Raw River strings can be used to write OTTL statements. -For example, the OTTL statement `attributes["grpc"] == true` -is written in River as \`attributes["grpc"] == true\` - +For example, the OTTL statement `attributes["grpc"] == true` is written in River as \`attributes["grpc"] == true\` {{< /admonition >}} {{< admonition type="note" >}} -`otelcol.processor.filter` is a wrapper over the upstream -OpenTelemetry Collector `filter` processor. If necessary, bug reports or feature requests -will be redirected to the upstream repository. +`otelcol.processor.filter` is a wrapper over the upstream OpenTelemetry Collector `filter` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} You can specify multiple `otelcol.processor.filter` components by giving them different labels. @@ -133,7 +124,7 @@ If all span events for a span are dropped, the span will be left intact. ### metrics block -The `metrics` block specifies statements that filter metric telemetry signals. +The `metrics` block specifies statements that filter metric telemetry signals. Only one `metrics` blocks can be specified. Name | Type | Description | Default | Required @@ -141,8 +132,7 @@ Name | Type | Description `metric` | `list(string)` | List of OTTL statements filtering OTLP metric. | | no `datapoint` | `list(string)` | List of OTTL statements filtering OTLP metric datapoints. | | no -The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry -documentation for more information: +The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry documentation for more information: * [OTTL metric context][] * [OTTL datapoint context][] @@ -157,30 +147,28 @@ If all datapoints for a metric are dropped, the metric will also be dropped. ### logs block -The `logs` block specifies statements that filter log telemetry signals. +The `logs` block specifies statements that filter log telemetry signals. Only `logs` blocks can be specified. Name | Type | Description | Default | Required --------------- | -------------- | ---------------------------------------------- | ------- | -------- `log_record` | `list(string)` | List of OTTL statements filtering OTLP metric. | | no -The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry -documentation for more information: +The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry documentation for more information: * [OTTL log context][] Only one of the statements inside the list of statements has to be satisfied. - ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description -------- | ------------------ | ----------- +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, @@ -290,7 +278,7 @@ Some values in the River strings are [escaped][river-strings]: * `\` is escaped with `\\` * `"` is escaped with `\"` -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings [OTTL]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.85.0/pkg/ottl/README.md diff --git a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md b/docs/sources/reference/components/otelcol.processor.k8sattributes.md similarity index 72% rename from docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md rename to docs/sources/reference/components/otelcol.processor.k8sattributes.md index fb2f1c785a..1622902877 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md +++ b/docs/sources/reference/components/otelcol.processor.k8sattributes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.k8sattributes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.k8sattributes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.k8sattributes/ description: Learn about otelcol.processor.k8sattributes title: otelcol.processor.k8sattributes --- @@ -15,13 +10,11 @@ title: otelcol.processor.k8sattributes components and adds Kubernetes metadata to the resource attributes of spans, logs, or metrics. {{< admonition type="note" >}} -`otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry -Collector `k8sattributes` processor. If necessary, bug reports or feature requests -will be redirected to the upstream repository. +`otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry Collector `k8sattributes` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} -You can specify multiple `otelcol.processor.k8sattributes` components by giving them -different labels. +You can specify multiple `otelcol.processor.k8sattributes` components by giving them different labels. ## Usage @@ -39,10 +32,10 @@ otelcol.processor.k8sattributes "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- |--------------------------------------------|-----------------| -------- -`auth_type` | `string` | Authentication method when connecting to the Kubernetes API. | `serviceAccount` | no -`passthrough` | `bool` | Passthrough signals as-is, only adding a `k8s.pod.ip` resource attribute. | `false` | no +Name | Type | Description | Default | Required +--------------|----------|---------------------------------------------------------------------------|------------------|--------- +`auth_type` | `string` | Authentication method when connecting to the Kubernetes API. | `serviceAccount` | no +`passthrough` | `bool` | Passthrough signals as-is, only adding a `k8s.pod.ip` resource attribute. | `false` | no The supported values for `auth_type` are: * `none`: No authentication is required. @@ -65,19 +58,20 @@ you can configure the DaemonSet {{< param "PRODUCT_ROOT_NAME" >}}s with `passthr The following blocks are supported inside the definition of `otelcol.processor.k8sattributes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -extract | [extract][] | Rules for extracting data from Kubernetes. | no -extract > annotation | [annotation][] | Creating resource attributes from Kubernetes annotations. | no -extract > label | [extract_label][] | Creating resource attributes from Kubernetes labels. | no -filter | [filter][] | Filters the data loaded from Kubernetes. | no -filter > field | [field][] | Filter pods by generic Kubernetes fields. | no -filter > label | [filter_label][] | Filter pods by Kubernetes labels. | no -pod_association | [pod_association][] | Rules to associate pod metadata with telemetry signals. | no -pod_association > source | [source][] | Source information to identify a pod. | no -exclude | [exclude][] | Exclude pods from being processed. | no -exclude > pod | [pod][] | Pod information. | no + +Hierarchy | Block | Description | Required +-------------------------|---------------------|-----------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +extract | [extract][] | Rules for extracting data from Kubernetes. | no +extract > annotation | [annotation][] | Creating resource attributes from Kubernetes annotations. | no +extract > label | [extract_label][] | Creating resource attributes from Kubernetes labels. | no +filter | [filter][] | Filters the data loaded from Kubernetes. | no +filter > field | [field][] | Filter pods by generic Kubernetes fields. | no +filter > label | [filter_label][] | Filter pods by Kubernetes labels. | no +pod_association | [pod_association][] | Rules to associate pod metadata with telemetry signals. | no +pod_association > source | [source][] | Source information to identify a pod. | no +exclude | [exclude][] | Exclude pods from being processed. | no +exclude > pod | [pod][] | Pod information. | no The `>` symbol indicates deeper levels of nesting. For example, `extract > annotation` @@ -101,8 +95,8 @@ The `extract` block configures which metadata, annotations, and labels to extrac The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------------|--------------------------------------|-------------| -------- +Name | Type | Description | Default | Required +-----------|----------------|--------------------------------------|-------------|--------- `metadata` | `list(string)` | Pre-configured metadata keys to add. | _See below_ | no The currently supported `metadata` keys are: @@ -143,13 +137,13 @@ By default, if `metadata` is not specified, the following fields are extracted a The `annotation` block configures how to extract Kubernetes annotations. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/extract-field-block.md" source="alloy" version="" >}} ### label block {#extract-label-block} The `label` block configures how to extract Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/extract-field-block.md" source="alloy" version="" >}} ### filter block @@ -157,10 +151,10 @@ The `filter` block configures which nodes to get data from and which fields and The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|-------------------------------------------------------------------------| ------- | -------- -`node` | `string` | Configures a Kubernetes node name or host name. | `""` | no -`namespace` | `string` | Filters all pods by the provided namespace. All other pods are ignored. | `""` | no +Name | Type | Description | Default | Required +------------|----------|-------------------------------------------------------------------------|---------|--------- +`node` | `string` | Configures a Kubernetes node name or host name. | `""` | no +`namespace` | `string` | Filters all pods by the provided namespace. All other pods are ignored. | `""` | no If `node` is specified, then any pods not running on the specified node will be ignored by `otelcol.processor.k8sattributes`. @@ -168,13 +162,13 @@ If `node` is specified, then any pods not running on the specified node will be The `field` block allows you to filter pods by generic Kubernetes fields. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/field-filter-block.md" source="alloy" version="" >}} ### label block {#filter-label-block} The `label` block allows you to filter pods by generic Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/field-filter-block.md" source="alloy" version="" >}} ### pod_association block @@ -215,10 +209,10 @@ pod to be associated with the telemetry signal. The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|----------------------------------------------------------------------------------| ------- | -------- -`from` | `string` | The association method. Currently supports `resource_attribute` and `connection` | | yes -`name` | `string` | Name represents extracted key name. For example, `ip`, `pod_uid`, `k8s.pod.ip` | | no +Name | Type | Description | Default | Required +-------|----------|----------------------------------------------------------------------------------|---------|--------- +`from` | `string` | The association method. Currently supports `resource_attribute` and `connection` | | yes +`name` | `string` | Name represents extracted key name. For example, `ip`, `pod_uid`, `k8s.pod.ip` | | no ### exclude block @@ -231,20 +225,20 @@ The `pod` block configures a pod to be excluded from the processor. The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|---------------------| ------- | -------- -`name` | `string` | The name of the pod | | yes +Name | Type | Description | Default | Required +-------|----------|---------------------|---------|--------- +`name` | `string` | The name of the pod | | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). diff --git a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md b/docs/sources/reference/components/otelcol.processor.memory_limiter.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md rename to docs/sources/reference/components/otelcol.processor.memory_limiter.md index a7c5a90ab3..9bf3ec2f9f 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md +++ b/docs/sources/reference/components/otelcol.processor.memory_limiter.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.memory_limiter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.memory_limiter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.memory_limiter/ description: Learn about otelcol.processor.memory_limiter title: otelcol.processor.memory_limiter --- @@ -36,7 +31,7 @@ giving them different labels. ```river otelcol.processor.memory_limiter "LABEL" { check_interval = "1s" - + limit = "50MiB" // alternatively, set `limit_percentage` and `spike_limit_percentage` output { @@ -51,14 +46,13 @@ otelcol.processor.memory_limiter "LABEL" { `otelcol.processor.memory_limiter` supports the following arguments: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`check_interval` | `duration` | How often to check memory usage. | | yes -`limit` | `string` | Maximum amount of memory targeted to be allocated by the process heap. | `"0MiB"` | no -`spike_limit` | `string` | Maximum spike expected between the measurements of memory usage. | 20% of `limit` | no -`limit_percentage` | `int` | Maximum amount of total available memory targeted to be allocated by the process heap. | `0` | no -`spike_limit_percentage` |` int` | Maximum spike expected between the measurements of memory usage. | `0` | no +Name | Type | Description | Default | Required +-------------------------|------------|----------------------------------------------------------------------------------------|----------------|--------- +`check_interval` | `duration` | How often to check memory usage. | | yes +`limit` | `string` | Maximum amount of memory targeted to be allocated by the process heap. | `"0MiB"` | no +`spike_limit` | `string` | Maximum spike expected between the measurements of memory usage. | 20% of `limit` | no +`limit_percentage` | `int` | Maximum amount of total available memory targeted to be allocated by the process heap. | `0` | no +`spike_limit_percentage` | ` int` | Maximum spike expected between the measurements of memory usage. | `0` | no The arguments must define either `limit` or the `limit_percentage, spike_limit_percentage` pair, but not both. @@ -79,22 +73,22 @@ The `limit` and `spike_limit` values must be larger than 1 MiB. The following blocks are supported inside the definition of `otelcol.processor.memory_limiter`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md b/docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md rename to docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md index 70dfbf8ba6..b221d276df 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md +++ b/docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.probabilistic_sampler/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.probabilistic_sampler/ description: Learn about telcol.processor.probabilistic_sampler labels: stage: experimental @@ -11,18 +8,16 @@ title: otelcol.processor.probabilistic_sampler # otelcol.processor.probabilistic_sampler -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.probabilistic_sampler` accepts logs and traces data from other otelcol components and applies probabilistic sampling based on configuration options. {{< admonition type="note" >}} -`otelcol.processor.probabilistic_sampler` is a wrapper over the upstream -OpenTelemetry Collector Contrib `probabilistic_sampler` processor. If necessary, -bug reports or feature requests will be redirected to the upstream repository. +`otelcol.processor.probabilistic_sampler` is a wrapper over the upstream OpenTelemetry Collector Contrib `probabilistic_sampler` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} -You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them -different labels. +You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them different labels. ## Usage @@ -39,30 +34,29 @@ otelcol.processor.probabilistic_sampler "LABEL" { `otelcol.processor.probabilistic_sampler` supports the following arguments: -Name | Type | Description | Default | Required ----- |-----------|----------------------------------------------------------------------------------------------------------------------|-------------| -------- -`hash_seed` | `uint32` | An integer used to compute the hash algorithm. | `0` | no -`sampling_percentage` | `float32` | Percentage of traces or logs sampled. | `0` | no -`attribute_source` | `string` | Defines where to look for the attribute in `from_attribute`. | `"traceID"` | no -`from_attribute` | `string` | The name of a log record attribute used for sampling purposes. | `""` | no -`sampling_priority` | `string` | The name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. | `""` | no +Name | Type | Description | Default | Required +----------------------|-----------|----------------------------------------------------------------------------------------------------------------------|-------------|--------- +`hash_seed` | `uint32` | An integer used to compute the hash algorithm. | `0` | no +`sampling_percentage` | `float32` | Percentage of traces or logs sampled. | `0` | no +`attribute_source` | `string` | Defines where to look for the attribute in `from_attribute`. | `"traceID"` | no +`from_attribute` | `string` | The name of a log record attribute used for sampling purposes. | `""` | no +`sampling_priority` | `string` | The name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. | `""` | no `hash_seed` determines an integer to compute the hash algorithm. This argument could be used for both traces and logs. When used for logs, it computes the hash of a log record. -For hashing to work, all collectors for a given tier, for example, behind the same load balancer, must have the same `hash_seed`. -It is also possible to leverage a different `hash_seed` at different collector tiers to support additional sampling requirements. +For hashing to work, all collectors for a given tier, for example, behind the same load balancer, must have the same `hash_seed`. +It is also possible to leverage a different `hash_seed` at different collector tiers to support additional sampling requirements. `sampling_percentage` determines the percentage at which traces or logs are sampled. All traces or logs are sampled if you set this argument to a value greater than or equal to 100. -`attribute_source` (logs only) determines where to look for the attribute in `from_attribute`. The allowed values are `traceID` or `record`. +`attribute_source` (logs only) determines where to look for the attribute in `from_attribute`. The allowed values are `traceID` or `record`. `from_attribute` (logs only) determines the name of a log record attribute used for sampling purposes, such as a unique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`. `sampling_priority` (logs only) determines the name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. 0 means to never sample the log record, and greater than or equal to 100 means to always sample the log record. The `probabilistic_sampler` supports two types of sampling for traces: -1. `sampling.priority` [semantic - convention](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table) as defined by OpenTracing. +1. `sampling.priority` [semantic convention](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table) as defined by OpenTracing. 2. Trace ID hashing. The `sampling.priority` semantic convention takes priority over trace ID hashing. @@ -74,8 +68,8 @@ The `probabilistic_sampler` supports sampling logs according to their trace ID, The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for any telemetry signal of these types: @@ -133,7 +127,7 @@ otelcol.processor.probabilistic_sampler "default" { } ``` -### Sample logs according to a "priority" attribute +### Sample logs according to a "priority" attribute ```river otelcol.processor.probabilistic_sampler "default" { diff --git a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md b/docs/sources/reference/components/otelcol.processor.resourcedetection.md similarity index 95% rename from docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md rename to docs/sources/reference/components/otelcol.processor.resourcedetection.md index 2cc2224fa6..f28898eb09 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md +++ b/docs/sources/reference/components/otelcol.processor.resourcedetection.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.resourcedetection/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.resourcedetection/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.resourcedetection/ labels: stage: beta title: otelcol.processor.resourcedetection @@ -13,11 +8,11 @@ description: Learn about otelcol.processor.resourcedetection # otelcol.processor.resourcedetection -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} -`otelcol.processor.resourcedetection` detects resource information from the host -in a format that conforms to the [OpenTelemetry resource semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/), and appends or -overrides the resource values in the telemetry data with this information. +`otelcol.processor.resourcedetection` detects resource information from the host in a format that conforms to the [OpenTelemetry resource semantic conventions][], and appends or overrides the resource values in the telemetry data with this information. + +[OpenTelemetry resource semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/ {{< admonition type="note" >}} `otelcol.processor.resourcedetection` is a wrapper over the upstream @@ -123,16 +118,15 @@ kubernetes_node | [kubernetes_node][] | [system]: #system [openshift]: #openshift [kubernetes_node]: #kubernetes_node - [res-attr-cfg]: #resource-attribute-config ### output -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ### ec2 -The `ec2` block reads resource information from the [EC2 instance metadata API] using the [AWS SDK for Go][]. +The `ec2` block reads resource information from the [EC2 instance metadata API][] using the [AWS SDK for Go][]. The `ec2` block supports the following attributes: @@ -155,9 +149,9 @@ To fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy The `ec2` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ------------------------------------------------- | -------- -[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no ##### ec2 > resource_attributes @@ -177,7 +171,7 @@ Block | Description ### ecs -The `ecs` block queries the Task Metadata Endpoint (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. +The `ecs` block queries the [Task Metadata Endpoint][] (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. [Task Metadata Endpoint]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html @@ -582,9 +576,7 @@ For more information, see the [Heroku cloud provider documentation][] under the The `system` block queries the host machine to retrieve various resource attributes. {{< admonition type="note" >}} - Use the [Docker](#docker) detector if running {{< param "PRODUCT_ROOT_NAME" >}} as a Docker container. - {{< /admonition >}} The `system` block supports the following attributes: @@ -659,17 +651,17 @@ The determination of the API address, `ca_file`, and the service token is skippe The `openshift` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ---------------------------------------------------- | -------- -[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no -[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes +Block | Description | Required +-------------------------------------------------------|---------------------------------------------------------|--------- +[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no +[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes #### openshift > tls The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} #### openshift > resource_attributes @@ -713,9 +705,9 @@ rules: The `kubernetes_node` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ------------------------------------------------- | -------- -[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no +Block | Description | Required +-------------------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no #### kubernetes_node > resource_attributes diff --git a/docs/sources/flow/reference/components/otelcol.processor.span.md b/docs/sources/reference/components/otelcol.processor.span.md similarity index 65% rename from docs/sources/flow/reference/components/otelcol.processor.span.md rename to docs/sources/reference/components/otelcol.processor.span.md index 71c7357fec..7709b5b180 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.span.md +++ b/docs/sources/reference/components/otelcol.processor.span.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.span/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.span/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.span/ description: Learn about otelcol.processor.span labels: stage: experimental @@ -13,19 +8,17 @@ title: otelcol.processor.span # otelcol.processor.span -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} -`otelcol.processor.span` accepts traces telemetry data from other `otelcol` -components and modifies the names and attributes of the spans. -It also supports the ability to filter input data to determine if -it should be included or excluded from this processor. +`otelcol.processor.span` accepts traces telemetry data from other `otelcol` components and modifies the names and attributes of the spans. +It also supports the ability to filter input data to determine if it should be included or excluded from this processor. -> **NOTE**: `otelcol.processor.span` is a wrapper over the upstream -> OpenTelemetry Collector `span` processor. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.processor.span` is a wrapper over the upstream OpenTelemetry Collector `span` processor. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -You can specify multiple `otelcol.processor.span` components by giving them -different labels. +You can specify multiple `otelcol.processor.span` components by giving them different labels. ## Usage @@ -39,29 +32,29 @@ otelcol.processor.span "LABEL" { ## Arguments -`otelcol.processor.span` doesn't support any arguments and is configured fully -through inner blocks. +`otelcol.processor.span` doesn't support any arguments and is configured fully through inner blocks. ## Blocks The following blocks are supported inside the definition of `otelcol.processor.span`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -name | [name][] | Configures how to rename a span and add attributes. | no -name > to_attributes | [to-attributes][] | Configuration to create attributes from a span name. | no -status | [status][] | Specifies a status which should be set for this span. | no -include | [include][] | Filter for data included in this processor's actions. | no -include > regexp | [regexp][] | Regex cache settings. | no -include > attribute | [attribute][] | A list of attributes to match against. | no -include > resource | [resource][] | A list of items to match the resources against. | no -include > library | [library][] | A list of items to match the implementation library against. | no -exclude | [exclude][] | Filter for data excluded from this processor's actions | no -exclude > regexp | [regexp][] | Regex cache settings. | no -exclude > attribute | [attribute][] | A list of attributes to match against. | no -exclude > resource | [resource][] | A list of items to match the resources against. | no -exclude > library | [library][] | A list of items to match the implementation library against. | no + +Hierarchy | Block | Description | Required +---------------------|-------------------|--------------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +name | [name][] | Configures how to rename a span and add attributes. | no +name > to_attributes | [to-attributes][] | Configuration to create attributes from a span name. | no +status | [status][] | Specifies a status which should be set for this span. | no +include | [include][] | Filter for data included in this processor's actions. | no +include > regexp | [regexp][] | Regex cache settings. | no +include > attribute | [attribute][] | A list of attributes to match against. | no +include > resource | [resource][] | A list of items to match the resources against. | no +include > library | [library][] | A list of items to match the implementation library against. | no +exclude | [exclude][] | Filter for data excluded from this processor's actions | no +exclude > regexp | [regexp][] | Regex cache settings. | no +exclude > attribute | [attribute][] | A list of attributes to match against. | no +exclude > resource | [resource][] | A list of items to match the resources against. | no +exclude > library | [library][] | A list of items to match the implementation library against. | no The `>` symbol indicates deeper levels of nesting. For example, `include > attribute` refers to an `attribute` block defined inside an `include` block. @@ -81,13 +74,13 @@ If both an `include` block and an `exclude`block are specified, the `include` pr ### name block -The `name` block configures how to rename a span and add attributes. +The `name` block configures how to rename a span and add attributes. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`from_attributes` | `list(string)` | Attribute keys to pull values from, to generate a new span name. | `[]` | no +Name | Type | Description | Default | Required +------------------|----------------|------------------------------------------------------------------|---------|--------- +`from_attributes` | `list(string)` | Attribute keys to pull values from, to generate a new span name. | `[]` | no `separator` | `string` | Separates attributes values in the new span name. | `""` | no Firstly `from_attributes` rules are applied, then [to-attributes][] are applied. @@ -111,10 +104,10 @@ The `to_attributes` block configures how to create attributes from a span name. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`rules` | `list(string)` | A list of regex rules to extract attribute values from span name. | | yes -`break_after_match` | `bool` | Configures if processing of rules should stop after the first match. | `false` | no +Name | Type | Description | Default | Required +--------------------|----------------|----------------------------------------------------------------------|---------|--------- +`rules` | `list(string)` | A list of regex rules to extract attribute values from span name. | | yes +`break_after_match` | `bool` | Configures if processing of rules should stop after the first match. | `false` | no Each rule in the `rules` list is a regex pattern string. 1. The span name is checked against each regex in the list. @@ -135,10 +128,10 @@ The `status` block specifies a status which should be set for this span. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`code` | `string` | A status code. | | yes -`description` | `string` | An optional field documenting Error status codes. | `""` | no +Name | Type | Description | Default | Required +--------------|----------|---------------------------------------------------|---------|--------- +`code` | `string` | A status code. | | yes +`description` | `string` | An optional field documenting Error status codes. | `""` | no The supported values for `code` are: * `Ok` @@ -154,12 +147,12 @@ The `include` block provides an option to include data being fed into the The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_type` | `string` | Controls how items to match against are interpreted. | | yes -`services` | `list(string)` | A list of items to match the service name against. | `[]` | no -`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no -`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|------------------------------------------------------|---------|--------- +`match_type` | `string` | Controls how items to match against are interpreted. | | yes +`services` | `list(string)` | A list of items to match the service name against. | `[]` | no +`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no +`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no `match_type` is required and must be set to either `"regexp"` or `"strict"`. @@ -175,12 +168,12 @@ The `exclude` block provides an option to exclude data from being fed into the The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_type` | `string` | Controls how items to match against are interpreted. | | yes -`services` | `list(string)` | A list of items to match the service name against. | `[]` | no -`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no -`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|------------------------------------------------------|---------|--------- +`match_type` | `string` | Controls how items to match against are interpreted. | | yes +`services` | `list(string)` | A list of items to match the service name against. | `[]` | no +`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no +`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no `match_type` is required and must be set to either `"regexp"` or `"strict"`. @@ -191,30 +184,30 @@ with a non-empty value for a valid configuration. ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-regexp-block.md" source="alloy" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-attribute-block.md" source="alloy" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-resource-block.md" source="alloy" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-library-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-traces.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for traces telemetry signals. diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/reference/components/otelcol.processor.tail_sampling.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md rename to docs/sources/reference/components/otelcol.processor.tail_sampling.md index 32ff9ac4f7..c27fae1098 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/reference/components/otelcol.processor.tail_sampling.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.tail_sampling/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.tail_sampling/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.tail_sampling/ description: Learn about otelcol.processor.tail_sampling labels: stage: beta @@ -13,7 +13,7 @@ title: otelcol.processor.tail_sampling # otelcol.processor.tail_sampling -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.processor.tail_sampling` samples traces based on a set of defined policies. All spans for a given trace *must* be received by the same collector @@ -53,11 +53,11 @@ otelcol.processor.tail_sampling "LABEL" { `otelcol.processor.tail_sampling` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +------------------------------|------------|------------------------------------------------------------------------------|---------|--------- `decision_wait` | `duration` | Wait time since the first span of a trace before making a sampling decision. | `"30s"` | no -`num_traces` | `int` | Number of traces kept in memory. | `50000` | no -`expected_new_traces_per_sec` | `int` | Expected number of new traces (helps in allocating data structures). | `0` | no +`num_traces` | `int` | Number of traces kept in memory. | `50000` | no +`expected_new_traces_per_sec` | `int` | Expected number of new traces (helps in allocating data structures). | `0` | no `decision_wait` determines the number of batches to maintain on a channel. Its value must convert to a number of seconds greater than zero. @@ -125,7 +125,7 @@ output | [output] [] | Co [composite]: #composite-block [composite_sub_policy]: #composite_sub_policy-block [output]: #output-block -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ ### policy block @@ -133,10 +133,10 @@ The `policy` block configures a sampling policy used by the component. At least The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes Each policy results in a decision, and the processor evaluates them to make a final decision: @@ -153,9 +153,9 @@ The `latency` block configures a policy of type `latency`. The policy samples ba The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`threshold_ms` | `number` | The latency threshold for sampling, in milliseconds. | | yes +Name | Type | Description | Default | Required +---------------|----------|------------------------------------------------------|---------|--------- +`threshold_ms` | `number` | The latency threshold for sampling, in milliseconds. | | yes ### numeric_attribute block @@ -163,11 +163,11 @@ The `numeric_attribute` block configures a policy of type `numeric_attribute`. T The following arguments are supported: -Name | Type | Description | Default | Required ----- | ------- | ----------- | ------- | -------- -`key` | `string` | Tag that the filter is matched against. | | yes -`min_value` | `number` | The minimum value of the attribute to be considered a match. | | yes -`max_value` | `number` | The maximum value of the attribute to be considered a match. | | yes +Name | Type | Description | Default | Required +---------------|----------|----------------------------------------------------------------|---------|--------- +`key` | `string` | Tag that the filter is matched against. | | yes +`min_value` | `number` | The minimum value of the attribute to be considered a match. | | yes +`max_value` | `number` | The maximum value of the attribute to be considered a match. | | yes `invert_match` | `bool` | Indicates that values must not match against attribute values. | `false` | no ### probabilistic block @@ -176,10 +176,10 @@ The `probabilistic` block configures a policy of type `probabilistic`. The polic The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`sampling_percentage` | `number` | The percentage rate at which traces are sampled. | | yes -`hash_salt` | `string` | See below. | | no +Name | Type | Description | Default | Required +----------------------|----------|--------------------------------------------------|---------|--------- +`sampling_percentage` | `number` | The percentage rate at which traces are sampled. | | yes +`hash_salt` | `string` | See below. | | no Use `hash_salt` to configure the hashing salts. This is important in scenarios where multiple layers of collectors have different sampling rates. If multiple collectors use the same salt with different sampling rates, passing one @@ -191,9 +191,9 @@ The `status_code` block configures a policy of type `status_code`. The policy sa The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`status_codes` | `list(string)` | Holds the configurable settings to create a status code filter sampling policy evaluator. | | yes +Name | Type | Description | Default | Required +---------------|----------------|-------------------------------------------------------------------------------------------|---------|--------- +`status_codes` | `list(string)` | Holds the configurable settings to create a status code filter sampling policy evaluator. | | yes `status_codes` values must be "OK", "ERROR" or "UNSET". @@ -217,9 +217,9 @@ The `rate_limiting` block configures a policy of type `rate_limiting`. The polic The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`spans_per_second` | `number` | Sets the maximum number of spans that can be processed each second. | | yes +Name | Type | Description | Default | Required +-------------------|----------|---------------------------------------------------------------------|---------|--------- +`spans_per_second` | `number` | Sets the maximum number of spans that can be processed each second. | | yes ### span_count block @@ -227,24 +227,24 @@ The `span_count` block configures a policy of type `span_count`. The policy samp The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`min_spans` | `number` | Minimum number of spans in a trace. | | yes -`max_spans` | `number` | Maximum number of spans in a trace. | `0` | no +Name | Type | Description | Default | Required +------------|----------|-------------------------------------|---------|--------- +`min_spans` | `number` | Minimum number of spans in a trace. | | yes +`max_spans` | `number` | Maximum number of spans in a trace. | `0` | no Set `max_spans` to `0`, if you do not want to limit the policy samples based on the maximum number of spans in a trace. ### boolean_attribute block -The `boolean_attribute` block configures a policy of type `boolean_attribute`. +The `boolean_attribute` block configures a policy of type `boolean_attribute`. The policy samples based on a boolean attribute (resource and record). The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Attribute key to match against. | | yes -`value` | `bool` | The bool value (`true` or `false`) to use when matching against attribute values. | | yes +Name | Type | Description | Default | Required +--------|----------|-----------------------------------------------------------------------------------|---------|--------- +`key` | `string` | Attribute key to match against. | | yes +`value` | `bool` | The bool value (`true` or `false`) to use when matching against attribute values. | | yes ### ottl_condition block @@ -253,11 +253,11 @@ The `ottl_condition` block configures a policy of type `ottl_condition`. The pol The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`error_mode` | `string` | Error handling if OTTL conditions fail to evaluate. | | yes -`span` | `list(string)` | OTTL conditions for spans. | `[]` | no -`spanevent` | `list(string)` | OTTL conditions for span events. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|-----------------------------------------------------|---------|--------- +`error_mode` | `string` | Error handling if OTTL conditions fail to evaluate. | | yes +`span` | `list(string)` | OTTL conditions for spans. | `[]` | no +`spanevent` | `list(string)` | OTTL conditions for span events. | `[]` | no The supported values for `error_mode` are: * `ignore`: Errors cause evaluation to continue to the next statement. @@ -271,10 +271,10 @@ The `trace_state` block configures a policy of type `trace_state`. The policy sa The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Tag that the filter is matched against. | | yes -`values` | `list(string)` | Set of values to use when matching against trace_state values. | | yes +Name | Type | Description | Default | Required +---------|----------------|----------------------------------------------------------------|---------|--------- +`key` | `string` | Tag that the filter is matched against. | | yes +`values` | `list(string)` | Set of values to use when matching against trace_state values. | | yes ### and block @@ -286,10 +286,10 @@ The `and_sub_policy` block configures a sampling policy used by the `and` block. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes ### composite block @@ -305,21 +305,21 @@ The `composite_sub_policy` block configures a sampling policy used by the `compo The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/reference/components/otelcol.processor.transform.md similarity index 94% rename from docs/sources/flow/reference/components/otelcol.processor.transform.md rename to docs/sources/reference/components/otelcol.processor.transform.md index 65e8bd5b6c..03dd8bfe3d 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/reference/components/otelcol.processor.transform.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.transform/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.transform/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.transform/ description: Learn about otelcol.processor.transform labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.processor.transform # otelcol.processor.transform -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.transform` accepts telemetry data from other `otelcol` components and modifies it using the [OpenTelemetry Transformation Language (OTTL)][OTTL]. @@ -49,14 +44,14 @@ There are two ways of inputting strings in River configuration files: * Using backticks ([raw River strings][river-raw-strings]). No characters must be escaped. However, it's not possible to have backticks inside the string. -For example, the OTTL statement `set(description, "Sum") where type == "Sum"` can be written as: +For example, the OTTL statement `set(description, "Sum") where type == "Sum"` can be written as: * A normal River string: `"set(description, \"Sum\") where type == \"Sum\""`. * A raw River string: ``` `set(description, "Sum") where type == "Sum"` ```. Raw strings are generally more convenient for writing OTTL statements. -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings +[river-raw-strings]: ../../../concepts/config-language/expressions/types_and_values/#raw-strings {{< /admonition >}} {{< admonition type="note" >}} @@ -68,19 +63,19 @@ will be redirected to the upstream repository. You can specify multiple `otelcol.processor.transform` components by giving them different labels. {{< admonition type="warning" >}} -`otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, -but this is not an exhaustive list. It is important to understand your data before using this processor. +`otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, +but this is not an exhaustive list. It is important to understand your data before using this processor. -- [Unsound Transformations][]: Transformations between metric data types are not defined in the [metrics data model][]. -To use these functions, you must understand the incoming data and know that it can be meaningfully converted +- [Unsound Transformations][]: Transformations between metric data types are not defined in the [metrics data model][]. +To use these functions, you must understand the incoming data and know that it can be meaningfully converted to a new metric data type or can be used to create new metrics. - - Although OTTL allows you to use the `set` function with `metric.data_type`, + - Although OTTL allows you to use the `set` function with `metric.data_type`, its implementation in the transform processor is a [no-op][]. To modify a data type, you must use a specific function such as `convert_gauge_to_sum`. - [Identity Conflict][]: Transformation of metrics can potentially affect a metric's identity, - leading to an Identity Crisis. Be especially cautious when transforming a metric name and when reducing or changing + leading to an Identity Crisis. Be especially cautious when transforming a metric name and when reducing or changing existing attributes. Adding new attributes is safe. -- [Orphaned Telemetry][]: The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces +- [Orphaned Telemetry][]: The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces and `span_id`, and `trace_id` logs. Modifying these fields could lead to orphaned spans or logs. [Unsound Transformations]: https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/docs/standard-warnings.md#unsound-transformations @@ -251,7 +246,7 @@ span using the `span` context, it is more efficient to use the `resource` contex ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields @@ -566,8 +561,8 @@ Each statement is enclosed in backticks instead of quotation marks. This constitutes a [raw string][river-raw-strings], and lets us avoid the need to escape each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] River string. -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings +[river-raw-strings]: ../../../concepts/config-language/expressions/types_and_values/#raw-strings [traces protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/trace/v1/trace.proto [metrics protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/metrics/v1/metrics.proto @@ -590,6 +585,7 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] [OTTL metric context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlmetric/README.md [OTTL datapoint context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottldatapoint/README.md [OTTL log context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottllog/README.md + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md b/docs/sources/reference/components/otelcol.receiver.jaeger.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.jaeger.md rename to docs/sources/reference/components/otelcol.receiver.jaeger.md index a77bc58c37..0d34dc1ed3 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md +++ b/docs/sources/reference/components/otelcol.receiver.jaeger.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.jaeger/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.jaeger/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.jaeger/ description: Learn about otelcol.receiver.jaeger title: otelcol.receiver.jaeger --- @@ -115,7 +110,7 @@ Name | Type | Description | Default | Required The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -223,11 +218,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/reference/components/otelcol.receiver.kafka.md similarity index 93% rename from docs/sources/flow/reference/components/otelcol.receiver.kafka.md rename to docs/sources/reference/components/otelcol.receiver.kafka.md index a1bcf950de..312e2fe7ee 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/reference/components/otelcol.receiver.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.kafka/ description: Learn about otelcol.receiver.kafka title: otelcol.receiver.kafka --- @@ -169,7 +164,7 @@ The `tls` block configures TLS settings used for connecting to the Kafka brokers. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### kerberos block @@ -279,11 +274,11 @@ Regular expressions are not allowed in the `headers` argument. Only exact matchi ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.loki.md b/docs/sources/reference/components/otelcol.receiver.loki.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.receiver.loki.md rename to docs/sources/reference/components/otelcol.receiver.loki.md index a658f35a7f..539e42e5cc 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.loki.md +++ b/docs/sources/reference/components/otelcol.receiver.loki.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.loki/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.loki/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.loki/ description: Learn about otelcol.receiver.loki labels: stage: beta @@ -13,7 +8,7 @@ title: otelcol.receiver.loki # otelcol.receiver.loki -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.receiver.loki` receives Loki log entries, converts them to the OpenTelemetry logs format, and forwards them to other `otelcol.*` components. @@ -49,7 +44,7 @@ output | [output][] | Configures where to send converted telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md b/docs/sources/reference/components/otelcol.receiver.opencensus.md similarity index 89% rename from docs/sources/flow/reference/components/otelcol.receiver.opencensus.md rename to docs/sources/reference/components/otelcol.receiver.opencensus.md index 01db61e67b..bf78f52021 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md +++ b/docs/sources/reference/components/otelcol.receiver.opencensus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.opencensus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.opencensus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.opencensus/ description: Learn about otelcol.receiver.opencensus title: otelcol.receiver.opencensus --- @@ -91,7 +86,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -131,11 +126,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/reference/components/otelcol.receiver.otlp.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.otlp.md rename to docs/sources/reference/components/otelcol.receiver.otlp.md index 55bb0db345..251ec9d6f6 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/reference/components/otelcol.receiver.otlp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.otlp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/ description: Learn about otelcol.receiver.otlp title: otelcol.receiver.otlp --- @@ -187,11 +182,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md b/docs/sources/reference/components/otelcol.receiver.prometheus.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.receiver.prometheus.md rename to docs/sources/reference/components/otelcol.receiver.prometheus.md index ce9e9b9f89..79a2dfe11e 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md +++ b/docs/sources/reference/components/otelcol.receiver.prometheus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.prometheus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.prometheus/ description: Learn about otelcol.receiver.prometheus labels: stage: beta @@ -13,7 +8,7 @@ title: otelcol.receiver.prometheus # otelcol.receiver.prometheus -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.receiver.prometheus` receives Prometheus metrics, converts them to the OpenTelemetry metrics format, and forwards them to other `otelcol.*` @@ -50,7 +45,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/reference/components/otelcol.receiver.vcenter.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.vcenter.md rename to docs/sources/reference/components/otelcol.receiver.vcenter.md index a7f0f70ced..8694e7a85f 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md +++ b/docs/sources/reference/components/otelcol.receiver.vcenter.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.vcenter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.vcenter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.vcenter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.vcenter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.vcenter/ title: otelcol.receiver.vcenter description: Learn about otelcol.receiver.vcenter labels: @@ -12,7 +8,7 @@ labels: # otelcol.receiver.vcenter -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.receiver.vcenter` accepts metrics from a vCenter or ESXi host running VMware vSphere APIs and @@ -94,7 +90,7 @@ output | [output][] | Configures where to send received telemetry data. | yes The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### metrics block @@ -172,11 +168,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md b/docs/sources/reference/components/otelcol.receiver.zipkin.md similarity index 84% rename from docs/sources/flow/reference/components/otelcol.receiver.zipkin.md rename to docs/sources/reference/components/otelcol.receiver.zipkin.md index 87ed3b6ced..205d33ab76 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md +++ b/docs/sources/reference/components/otelcol.receiver.zipkin.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.zipkin/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.zipkin/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.zipkin/ description: Learn about otelcol.receiver.zipkin title: otelcol.receiver.zipkin --- @@ -71,7 +66,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### cors block @@ -97,11 +92,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.exporter.apache.md b/docs/sources/reference/components/prometheus.exporter.apache.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.apache.md rename to docs/sources/reference/components/prometheus.exporter.apache.md index 5bbccf271d..4c9b9acc6b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.apache.md +++ b/docs/sources/reference/components/prometheus.exporter.apache.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.apache/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.apache/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.apache/ description: Learn about prometheus.exporter.apache title: prometheus.exporter.apache --- @@ -34,7 +29,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -86,7 +81,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.azure.md b/docs/sources/reference/components/prometheus.exporter.azure.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.azure.md rename to docs/sources/reference/components/prometheus.exporter.azure.md index 3c014f6919..d02385f94d 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.azure.md +++ b/docs/sources/reference/components/prometheus.exporter.azure.md @@ -1,17 +1,12 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.azure/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.azure/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.azure/ description: Learn about prometheus.exporter.azure title: prometheus.exporter.azure --- # prometheus.exporter.azure -The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). +The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). The exporter supports all metrics defined by Azure Monitor. You can find the complete list of available metrics in the [Azure Monitor documentation](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). Metrics for this integration are exposed with the template `azure_{type}_{metric}_{aggregation}_{unit}` by default. As an example, @@ -101,7 +96,7 @@ Valid values for `azure_cloud_environment` are `azurecloud`, `azurechinacloud`, ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md b/docs/sources/reference/components/prometheus.exporter.blackbox.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.blackbox.md rename to docs/sources/reference/components/prometheus.exporter.blackbox.md index 6fc8021d7b..39d02ef419 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md +++ b/docs/sources/reference/components/prometheus.exporter.blackbox.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.blackbox/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.blackbox/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.blackbox/ description: Learn about prometheus.exporter.blackbox title: prometheus.exporter.blackbox --- @@ -74,7 +69,7 @@ Labels specified in the `labels` argument will not override labels set by `black ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -194,7 +189,7 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md b/docs/sources/reference/components/prometheus.exporter.cadvisor.md similarity index 88% rename from docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md rename to docs/sources/reference/components/prometheus.exporter.cadvisor.md index c40f951d9e..6f4ee0ba04 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md +++ b/docs/sources/reference/components/prometheus.exporter.cadvisor.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cadvisor/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cadvisor/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.cadvisor/ description: Learn about the prometheus.exporter.cadvisor title: prometheus.exporter.cadvisor --- @@ -71,7 +66,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -125,7 +120,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/reference/components/prometheus.exporter.cloudwatch.md similarity index 97% rename from docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md rename to docs/sources/reference/components/prometheus.exporter.cloudwatch.md index 4caae767f3..efcf9913f5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/reference/components/prometheus.exporter.cloudwatch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cloudwatch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.cloudwatch/ description: Learn about prometheus.exporter.cloudwatch title: prometheus.exporter.cloudwatch --- @@ -341,7 +336,7 @@ This feature also prevents component scrape timeouts when you gather high volume ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.consul.md b/docs/sources/reference/components/prometheus.exporter.consul.md similarity index 89% rename from docs/sources/flow/reference/components/prometheus.exporter.consul.md rename to docs/sources/reference/components/prometheus.exporter.consul.md index a8480208ed..bae1916106 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.consul.md +++ b/docs/sources/reference/components/prometheus.exporter.consul.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.consul/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.consul/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.consul/ description: Learn about prometheus.exporter.consul title: prometheus.exporter.consul --- @@ -44,7 +39,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -96,7 +91,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md b/docs/sources/reference/components/prometheus.exporter.dnsmasq.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md rename to docs/sources/reference/components/prometheus.exporter.dnsmasq.md index 80fdd881ae..243bc03a15 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md +++ b/docs/sources/reference/components/prometheus.exporter.dnsmasq.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.dnsmasq/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.dnsmasq/ description: Learn about prometheus.exporter.dnsmasq title: prometheus.exporter.dnsmasq --- @@ -34,7 +29,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -86,7 +81,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md b/docs/sources/reference/components/prometheus.exporter.elasticsearch.md similarity index 89% rename from docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md rename to docs/sources/reference/components/prometheus.exporter.elasticsearch.md index 487ce82eab..147141f227 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md +++ b/docs/sources/reference/components/prometheus.exporter.elasticsearch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.elasticsearch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.elasticsearch/ description: Learn about prometheus.exporter.elasticsearch title: prometheus.exporter.elasticsearch --- @@ -69,11 +64,11 @@ The following blocks are supported inside the definition of ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -129,7 +124,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md b/docs/sources/reference/components/prometheus.exporter.gcp.md similarity index 94% rename from docs/sources/flow/reference/components/prometheus.exporter.gcp.md rename to docs/sources/reference/components/prometheus.exporter.gcp.md index 017542a0a8..213c1b2b59 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md +++ b/docs/sources/reference/components/prometheus.exporter.gcp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.gcp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.gcp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.gcp/ description: Learn about prometheus.exporter.gcp title: prometheus.exporter.gcp --- @@ -82,7 +77,7 @@ For `ingest_delay`, you can see the values for this in documented metrics as `Af ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.github.md b/docs/sources/reference/components/prometheus.exporter.github.md similarity index 84% rename from docs/sources/flow/reference/components/prometheus.exporter.github.md rename to docs/sources/reference/components/prometheus.exporter.github.md index 10b641a6e6..a803653ccd 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.github.md +++ b/docs/sources/reference/components/prometheus.exporter.github.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.github/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.github/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.github/ description: Learn about prometheus.exporter.github title: prometheus.exporter.github --- @@ -41,7 +36,7 @@ When provided, `api_token_file` takes precedence over `api_token`. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -94,7 +89,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md b/docs/sources/reference/components/prometheus.exporter.kafka.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.exporter.kafka.md rename to docs/sources/reference/components/prometheus.exporter.kafka.md index 4dbd7c4c4c..643505c804 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md +++ b/docs/sources/reference/components/prometheus.exporter.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.kafka/ description: Learn about prometheus.exporter.kafka title: prometheus.exporter.kafka --- @@ -54,7 +49,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -106,7 +101,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md b/docs/sources/reference/components/prometheus.exporter.memcached.md similarity index 79% rename from docs/sources/flow/reference/components/prometheus.exporter.memcached.md rename to docs/sources/reference/components/prometheus.exporter.memcached.md index 8bf7d6e54f..f0c06223bb 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md +++ b/docs/sources/reference/components/prometheus.exporter.memcached.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.memcached/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.memcached/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.memcached/ description: Learn about prometheus.exporter.memcached title: prometheus.exporter.memcached --- @@ -42,11 +37,11 @@ The following blocks are supported inside the definition of `prometheus.exporter ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -98,7 +93,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md b/docs/sources/reference/components/prometheus.exporter.mongodb.md similarity index 85% rename from docs/sources/flow/reference/components/prometheus.exporter.mongodb.md rename to docs/sources/reference/components/prometheus.exporter.mongodb.md index e6231dad9d..7de3495820 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md +++ b/docs/sources/reference/components/prometheus.exporter.mongodb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mongodb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mongodb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mongodb/ description: Learn about prometheus.exporter.mongodb title: prometheus.exporter.mongodb --- @@ -46,7 +41,7 @@ For `tls_basic_auth_config_path`, check [`tls_config`](https://prometheus.io/doc ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -87,7 +82,7 @@ prometheus.remote_write "default" { } ``` -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/reference/components/prometheus.exporter.mssql.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.mssql.md rename to docs/sources/reference/components/prometheus.exporter.mssql.md index ef7e708591..ebaf00f475 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/reference/components/prometheus.exporter.mssql.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mssql/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mssql/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mssql/ description: Learn about prometheus.exporter.mssql title: prometheus.exporter.mssql --- @@ -75,7 +70,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -127,7 +122,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ ## Custom metrics You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md b/docs/sources/reference/components/prometheus.exporter.mysql.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.mysql.md rename to docs/sources/reference/components/prometheus.exporter.mysql.md index 14df71386a..f062888739 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md +++ b/docs/sources/reference/components/prometheus.exporter.mysql.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mysql/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mysql/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mysql/ description: Learn about prometheus.exporter.mysql title: prometheus.exporter.mysql --- @@ -158,7 +153,7 @@ The full list of supported collectors is: ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -211,7 +206,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md b/docs/sources/reference/components/prometheus.exporter.oracledb.md similarity index 83% rename from docs/sources/flow/reference/components/prometheus.exporter.oracledb.md rename to docs/sources/reference/components/prometheus.exporter.oracledb.md index a259a5bfae..ccad3484ec 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md +++ b/docs/sources/reference/components/prometheus.exporter.oracledb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.oracledb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.oracledb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.oracledb/ description: Learn about prometheus.exporter.oracledb title: prometheus.exporter.oracledb --- @@ -47,7 +42,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -99,7 +94,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/reference/components/prometheus.exporter.postgres.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.exporter.postgres.md rename to docs/sources/reference/components/prometheus.exporter.postgres.md index d5f6cc78ea..5c12182702 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/reference/components/prometheus.exporter.postgres.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.postgres/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.postgres/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.postgres/ description: Learn about prometheus.exporter.postgres labels: stage: beta @@ -72,7 +67,7 @@ If `autodiscovery` is disabled, neither `database_allowlist` nor `database_denyl ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -212,7 +207,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.process.md b/docs/sources/reference/components/prometheus.exporter.process.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.process.md rename to docs/sources/reference/components/prometheus.exporter.process.md index 2ece4bfb96..bc38cae844 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.process.md +++ b/docs/sources/reference/components/prometheus.exporter.process.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.process/ description: Learn about prometheus.exporter.process title: prometheus.exporter.process --- @@ -76,7 +71,7 @@ Each regex in `cmdline` must match the corresponding argv for the process to be ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -132,7 +127,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.redis.md b/docs/sources/reference/components/prometheus.exporter.redis.md similarity index 93% rename from docs/sources/flow/reference/components/prometheus.exporter.redis.md rename to docs/sources/reference/components/prometheus.exporter.redis.md index 93cc839aeb..0f02e4f3d4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.redis.md +++ b/docs/sources/reference/components/prometheus.exporter.redis.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.redis/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.redis/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.redis/ description: Learn about prometheus.exporter.redis title: prometheus.exporter.redis --- @@ -78,7 +73,7 @@ Note that setting `export_client_port` increases the cardinality of all Redis me ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -130,7 +125,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.self.md b/docs/sources/reference/components/prometheus.exporter.self.md similarity index 80% rename from docs/sources/flow/reference/components/prometheus.exporter.self.md rename to docs/sources/reference/components/prometheus.exporter.self.md index 42970e3214..0b700825ed 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.self.md +++ b/docs/sources/reference/components/prometheus.exporter.self.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.agent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.agent/ -- ./prometheus.exporter.agent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.self/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.self/ description: Learn about prometheus.exporter.self title: prometheus.exporter.self --- @@ -25,7 +21,7 @@ prometheus.exporter.self "agent" { ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -72,8 +68,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} - +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md b/docs/sources/reference/components/prometheus.exporter.snmp.md similarity index 91% rename from docs/sources/flow/reference/components/prometheus.exporter.snmp.md rename to docs/sources/reference/components/prometheus.exporter.snmp.md index 2773809724..d910dd3018 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md +++ b/docs/sources/reference/components/prometheus.exporter.snmp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snmp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snmp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.snmp/ description: Learn about prometheus.exporter.snmp title: prometheus.exporter.snmp --- @@ -89,7 +84,7 @@ The `walk_param` block may be specified multiple times to define multiple SNMP c ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -198,7 +193,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md b/docs/sources/reference/components/prometheus.exporter.snowflake.md similarity index 83% rename from docs/sources/flow/reference/components/prometheus.exporter.snowflake.md rename to docs/sources/reference/components/prometheus.exporter.snowflake.md index c0b0758260..30e676e9f4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md +++ b/docs/sources/reference/components/prometheus.exporter.snowflake.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snowflake/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snowflake/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.snowflake/ description: Learn about prometheus.exporter.snowflake title: prometheus.exporter.snowflake --- @@ -45,7 +40,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -100,7 +95,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.squid.md b/docs/sources/reference/components/prometheus.exporter.squid.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.squid.md rename to docs/sources/reference/components/prometheus.exporter.squid.md index 44df648863..ab85cccb78 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.squid.md +++ b/docs/sources/reference/components/prometheus.exporter.squid.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.squid/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.squid/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.squid/ description: Learn about prometheus.exporter.squid title: prometheus.exporter.squid --- @@ -40,7 +35,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -92,7 +87,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md b/docs/sources/reference/components/prometheus.exporter.statsd.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.statsd.md rename to docs/sources/reference/components/prometheus.exporter.statsd.md index 40eb9e4eda..799ec989c6 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md +++ b/docs/sources/reference/components/prometheus.exporter.statsd.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.statsd/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.statsd/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.statsd/ description: Learn about prometheus.exporter.statsd title: prometheus.exporter.statsd --- @@ -59,7 +54,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -125,7 +120,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.unix.md b/docs/sources/reference/components/prometheus.exporter.unix.md similarity index 98% rename from docs/sources/flow/reference/components/prometheus.exporter.unix.md rename to docs/sources/reference/components/prometheus.exporter.unix.md index 46f4f64e9b..1d322aced9 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/reference/components/prometheus.exporter.unix.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.unix/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.unix/ description: Learn about prometheus.exporter.unix title: prometheus.exporter.unix --- @@ -262,7 +257,7 @@ An explicit value in the block takes precedence over the environment variable. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -408,7 +403,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md b/docs/sources/reference/components/prometheus.exporter.vsphere.md similarity index 80% rename from docs/sources/flow/reference/components/prometheus.exporter.vsphere.md rename to docs/sources/reference/components/prometheus.exporter.vsphere.md index 558eff9f90..869cc61a75 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md +++ b/docs/sources/reference/components/prometheus.exporter.vsphere.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.vsphere/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.vsphere/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.vsphere/ title: prometheus.exporter.vsphere description: Learn about prometheus.exporter.vsphere --- @@ -13,10 +8,11 @@ description: Learn about prometheus.exporter.vsphere The `prometheus.exporter.vsphere` component embeds [`vmware_exporter`](https://github.com/grafana/vmware_exporter) to collect vSphere metrics -> **NOTE**: We recommend to use [otelcol.receiver.vcenter][] instead. - -[otelcol.receiver.vcenter]: {{< relref "./otelcol.receiver.vcenter.md" >}} +{{< admonition type="note" >}} +We recommend to use [otelcol.receiver.vcenter][] instead. +[otelcol.receiver.vcenter]: ./otelcol.receiver.vcenter/ +{{< /admonition >}} ## Usage @@ -45,7 +41,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -88,7 +84,7 @@ prometheus.remote_write "default" { } ``` -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/reference/components/prometheus.exporter.windows.md similarity index 96% rename from docs/sources/flow/reference/components/prometheus.exporter.windows.md rename to docs/sources/reference/components/prometheus.exporter.windows.md index 85c2948256..65c38a2286 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/reference/components/prometheus.exporter.windows.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.windows/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.windows/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.windows/ description: Learn about prometheus.exporter.windows title: prometheus.exporter.windows --- @@ -202,7 +197,7 @@ When `text_file_directory` is set, only files with the extension `.prom` inside ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -321,7 +316,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md b/docs/sources/reference/components/prometheus.operator.podmonitors.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.podmonitors.md rename to docs/sources/reference/components/prometheus.operator.podmonitors.md index 34d73ae784..695a58f004 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md +++ b/docs/sources/reference/components/prometheus.operator.podmonitors.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.podmonitors/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.podmonitors/ description: Learn about prometheus.operator.podmonitors labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.podmonitors # prometheus.operator.podmonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.podmonitors` discovers [PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -103,31 +98,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -191,7 +186,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.podmonitors` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.operator.probes.md b/docs/sources/reference/components/prometheus.operator.probes.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.probes.md rename to docs/sources/reference/components/prometheus.operator.probes.md index b51f0eef0b..7347d18379 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.probes.md +++ b/docs/sources/reference/components/prometheus.operator.probes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.probes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.probes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.probes/ description: Learn about prometheus.operator.probes labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.probes # prometheus.operator.probes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.probes` discovers [Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe) resources in your Kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -105,31 +100,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -193,7 +188,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.probes` scrapes every target it receives in its arguments. -[clustered mode]: {{< relref "../cli/run.md#clustering-beta" >}} +[clustered mode]: ../../cli/run/#clustering-beta ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/reference/components/prometheus.operator.servicemonitors.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md rename to docs/sources/reference/components/prometheus.operator.servicemonitors.md index b3e89eee32..c62a7906d9 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/reference/components/prometheus.operator.servicemonitors.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.servicemonitors/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.servicemonitors/ description: Learn about prometheus.operator.servicemonitors labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.servicemonitors # prometheus.operator.servicemonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.servicemonitors` discovers [ServiceMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -104,31 +99,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -192,7 +187,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.servicemonitors` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.receive_http.md b/docs/sources/reference/components/prometheus.receive_http.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.receive_http.md rename to docs/sources/reference/components/prometheus.receive_http.md index dd78e88ad1..3fa7e1905a 100644 --- a/docs/sources/flow/reference/components/prometheus.receive_http.md +++ b/docs/sources/reference/components/prometheus.receive_http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.receive_http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.receive_http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.receive_http/ description: Learn about prometheus.receive_http title: prometheus.receive_http --- @@ -13,9 +8,9 @@ title: prometheus.receive_http `prometheus.receive_http` listens for HTTP requests containing Prometheus metric samples and forwards them to other components capable of receiving metrics. -The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using {{< param "PRODUCT_ROOT_NAME" >}} as a proxy for prometheus metrics. +The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using {{< param "PRODUCT_ROOT_NAME" >}} as a proxy for Prometheus metrics. -[prometheus.remote_write]: {{< relref "./prometheus.remote_write.md" >}} +[prometheus.remote_write]: ../prometheus.remote_write/ [prometheus-remote-write-docs]: https://prometheus.io/docs/prometheus/2.45/querying/api/#remote-write-receiver ## Usage @@ -54,7 +49,7 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ## Exported fields @@ -86,7 +81,7 @@ This example creates a `prometheus.receive_http` component which starts an HTTP prometheus.receive_http "api" { http { listen_address = "0.0.0.0" - listen_port = 9999 + listen_port = 9999 } forward_to = [prometheus.remote_write.local.receiver] } @@ -95,7 +90,7 @@ prometheus.receive_http "api" { prometheus.remote_write "local" { endpoint { url = "http://mimir:9009/api/v1/push" - + basic_auth { username = "example-user" password = "example-password" diff --git a/docs/sources/flow/reference/components/prometheus.relabel.md b/docs/sources/reference/components/prometheus.relabel.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.relabel.md rename to docs/sources/reference/components/prometheus.relabel.md index 6ff90a88f0..9b471a50e2 100644 --- a/docs/sources/flow/reference/components/prometheus.relabel.md +++ b/docs/sources/reference/components/prometheus.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/ description: Learn about prometheus.relabel title: prometheus.relabel --- @@ -70,7 +65,7 @@ rule | [rule][] | Relabeling rules to apply to received metrics. | no ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/reference/components/prometheus.remote_write.md similarity index 71% rename from docs/sources/flow/reference/components/prometheus.remote_write.md rename to docs/sources/reference/components/prometheus.remote_write.md index 12882a498e..480e6b7120 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/reference/components/prometheus.remote_write.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.remote_write/ description: Learn about prometheus.remote_write title: prometheus.remote_write --- @@ -128,35 +123,35 @@ sent to `prometheus.remote_write` are forwarded to the configured endpoint. If the endpoint doesn't support receiving native histogram samples, pushing metrics fails. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### sigv4 block -{{< docs/shared lookup="flow/reference/components/sigv4-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/sigv4-block.md" source="alloy" version="" >}} ### azuread block -{{< docs/shared lookup="flow/reference/components/azuread-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/azuread-block.md" source="alloy" version="" >}} ### managed_identity block -{{< docs/shared lookup="flow/reference/components/managed_identity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/managed_identity-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### queue_config block @@ -211,7 +206,7 @@ Name | Type | Description | Default | Required ### write_relabel_config block -{{< docs/shared lookup="flow/reference/components/write_relabel_config.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/write_relabel_config.md" source="alloy" version="" >}} ### wal block @@ -244,7 +239,7 @@ of data in the WAL; samples aren't removed until they are at least as old as `min_keepalive_time`, and samples are forcibly removed if they are older than `max_keepalive_time`. -[run]: {{< relref "../cli/run.md" >}} +[run]: {../../cli/run/ ## Exported fields @@ -375,7 +370,8 @@ prometheus.scrape "demo" { ### Send metrics to a Mimir instance with a tenant specified -You can create a `prometheus.remote_write` component that sends your metrics to a specific tenant within the Mimir instance. This is useful when your Mimir instance is using more than one tenant: +You can create a `prometheus.remote_write` component that sends your metrics to a specific tenant within the Mimir instance. +This is useful when your Mimir instance is using more than one tenant: ```river prometheus.remote_write "staging" { @@ -392,7 +388,8 @@ prometheus.remote_write "staging" { ### Send metrics to a managed service -You can create a `prometheus.remote_write` component that sends your metrics to a managed service, for example, Grafana Cloud. The Prometheus username and the Grafana Cloud API Key are injected in this example through environment variables. +You can create a `prometheus.remote_write` component that sends your metrics to a managed service, for example, Grafana Cloud. +The Prometheus username and the Grafana Cloud API Key are injected in this example through environment variables. ```river prometheus.remote_write "default" { @@ -407,13 +404,110 @@ prometheus.remote_write "default" { ``` ## Technical details -`prometheus.remote_write` uses [snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) for compression. +`prometheus.remote_write` uses [snappy][] for compression. Any labels that start with `__` will be removed before sending to the endpoint. ## Data retention -{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} +The `prometheus.remote_write` component uses a Write Ahead Log (WAL) to prevent +data loss during network outages. The component buffers the received metrics in +a WAL for each configured endpoint. The queue shards can use the WAL after the +network outage is resolved and flush the buffered metrics to the endpoints. + +The WAL records metrics in 128 MB files called segments. To avoid having a WAL +that grows on-disk indefinitely, the component _truncates_ its segments on a +set interval. + +On each truncation, the WAL deletes references to series that are no longer +present and also _checkpoints_ roughly the oldest two thirds of the segments +(rounded down to the nearest integer) written to it since the last truncation +period. A checkpoint means that the WAL only keeps track of the unique +identifier for each existing metrics series, and can no longer use the samples +for remote writing. If that data has not yet been pushed to the remote +endpoint, it is lost. + +This behavior dictates the data retention for the `prometheus.remote_write` +component. It also means that it's impossible to directly correlate data +retention directly to the data age itself, as the truncation logic works on +_segments_, not the samples themselves. This makes data retention less +predictable when the component receives a non-consistent rate of data. + +The [WAL block][] in Flow mode, or the [metrics config][] in Static mode +contain some configurable parameters that can be used to control the tradeoff +between memory usage, disk usage, and data retention. + +The `truncate_frequency` or `wal_truncate_frequency` parameter configures the +interval at which truncations happen. A lower value leads to reduced memory +usage, but also provides less resiliency to long outages. + +When a WAL clean-up starts, the most recently successfully sent timestamp is +used to determine how much data is safe to remove from the WAL. +The `min_keepalive_time` or `min_wal_time` controls the minimum age of samples +considered for removal. No samples more recent than `min_keepalive_time` are +removed. The `max_keepalive_time` or `max_wal_time` controls the maximum age of +samples that can be kept in the WAL. Samples older than +`max_keepalive_time` are forcibly removed. + +### Extended `remote_write` outages +When the remote write endpoint is unreachable over a period of time, the most +recent successfully sent timestamp is not updated. The +`min_keepalive_time` and `max_keepalive_time` arguments control the age range +of data kept in the WAL. + +If the remote write outage is longer than the `max_keepalive_time` parameter, +then the WAL is truncated, and the oldest data is lost. + +### Intermittent `remote_write` outages +If the remote write endpoint is intermittently reachable, the most recent +successfully sent timestamp is updated whenever the connection is successful. +A successful connection updates the series' comparison with +`min_keepalive_time` and triggers a truncation on the next `truncate_frequency` +interval which checkpoints two thirds of the segments (rounded down to the +nearest integer) written since the previous truncation. + +### Falling behind +If the queue shards cannot flush data quickly enough to keep +up-to-date with the most recent data buffered in the WAL, we say that the +component is 'falling behind'. +It's not unusual for the component to temporarily fall behind 2 or 3 scrape intervals. +If the component falls behind more than one third of the data written since the +last truncate interval, it is possible for the truncate loop to checkpoint data +before being pushed to the remote_write endpoint. + +### WAL corruption + +WAL corruption can occur when Grafana Agent unexpectedly stops while the latest WAL segments +are still being written to disk. For example, the host computer has a general disk failure +and crashes before you can stop Grafana Agent and other running services. When you restart Grafana +Agent, it verifies the WAL, removing any corrupt segments it finds. Sometimes, this repair +is unsuccessful, and you must manually delete the corrupted WAL to continue. + +If the WAL becomes corrupted, Grafana Agent writes error messages such as +`err="failed to find segment for index"` to the log file. + +{{< admonition type="note" >}} +Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. +{{< /admonition >}} + +To delete the corrupted WAL: + +1. [Stop][] Grafana Agent. +1. Find and delete the contents of the `wal` directory. + + By default the `wal` directory is a subdirectory + of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory + may be different than the default depending on the [wal_directory][] setting in your Static configuration + file or the path specified by the Flow [command line flag][run] `--storage-path`. + + {{< admonition type="note" >}} + There is one `wal` directory per: + + * Metrics instance running in Static mode + * `prometheus.remote_write` component running in Flow mode + {{< /admonition >}} + +1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. @@ -429,3 +523,10 @@ Refer to the linked documentation for more details. {{< /admonition >}} + +[snappy]: https://en.wikipedia.org/wiki/Snappy_(compression) +[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block +[metrics config]: /docs/agent//static/configuration/metrics-config +[Stop]: /docs/agent//flow/get-started/start-agent +[wal_directory]: /docs/agent//static/configuration/metrics-config +[run]: /docs/agent//flow/reference/cli/run diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/reference/components/prometheus.scrape.md similarity index 63% rename from docs/sources/flow/reference/components/prometheus.scrape.md rename to docs/sources/reference/components/prometheus.scrape.md index e329bfe4e5..864c7e7d1e 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/reference/components/prometheus.scrape.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.scrape/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.scrape/ description: Learn about prometheus.scrape title: prometheus.scrape --- @@ -42,36 +37,36 @@ time), the component reports an error. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of targets to scrape. | | yes -`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes -`job_name` | `string` | The value to use for the job label if not already set. | component name | no -`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no -`enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no -`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no -`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no -`track_timestamps_staleness` | `bool` | Indicator whether to track the staleness of the scraped timestamps. | `false` | no -`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no -`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no -`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no -`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no -`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no -`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no -`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no -`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no -`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +------------------------------|-------------------------|--------------------------------------------------------------------------------------------------------|----------------|--------- +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes +`job_name` | `string` | The value to use for the job label if not already set. | component name | no +`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no +`enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no +`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no +`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no +`track_timestamps_staleness` | `bool` | Indicator whether to track the staleness of the scraped timestamps. | `false` | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no +`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no +`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no +`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no +`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no +`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no +`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no +`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -80,7 +75,7 @@ Name | Type | Description | Default | Required - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} `track_timestamps_staleness` controls whether Prometheus tracks [staleness][prom-staleness] of metrics which with an explicit timestamp present in scraped data. * An "explicit timestamp" is an optional timestamp in the [Prometheus metrics exposition format][prom-text-exposition-format]. For example, this sample has a timestamp of `1395066363000`: @@ -100,14 +95,14 @@ Name | Type | Description | Default | Required The following blocks are supported inside the definition of `prometheus.scrape`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to targets. | no -authorization | [authorization][] | Configure generic authorization to targets. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to targets. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to targets via OAuth2. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to targets. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|--------------------------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to targets. | no +authorization | [authorization][] | Configure generic authorization to targets. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to targets. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to targets via OAuth2. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to targets. | no +clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -122,19 +117,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### clustering (beta) @@ -165,7 +160,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `prometheus.scrape` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields @@ -259,8 +254,8 @@ To enable scraping of Prometheus' native histograms over gRPC, the scrape the 'classic' histogram equivalent of a native histogram, if it is present. -[in-memory traffic]: {{< relref "../../concepts/component_controller.md#in-memory-traffic" >}} -[run command]: {{< relref "../cli/run.md" >}} +[in-memory traffic]: ../../../concepts/component_controller/#in-memory-traffic +[run command]: ../../cli/run/ ## Example diff --git a/docs/sources/flow/reference/components/pyroscope.ebpf.md b/docs/sources/reference/components/pyroscope.ebpf.md similarity index 95% rename from docs/sources/flow/reference/components/pyroscope.ebpf.md rename to docs/sources/reference/components/pyroscope.ebpf.md index dd13550576..2a793e1140 100644 --- a/docs/sources/flow/reference/components/pyroscope.ebpf.md +++ b/docs/sources/reference/components/pyroscope.ebpf.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.ebpf/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.ebpf/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.ebpf/ description: Learn about pyroscope.ebpf labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.ebpf # pyroscope.ebpf -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.ebpf` configures an ebpf profiling job for the current host. The collected performance profiles are forwarded to the list of receivers passed in `forward_to`. diff --git a/docs/sources/flow/reference/components/pyroscope.java.md b/docs/sources/reference/components/pyroscope.java.md similarity index 92% rename from docs/sources/flow/reference/components/pyroscope.java.md rename to docs/sources/reference/components/pyroscope.java.md index 3fdc810529..38eade4c77 100644 --- a/docs/sources/flow/reference/components/pyroscope.java.md +++ b/docs/sources/reference/components/pyroscope.java.md @@ -1,17 +1,12 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.java/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.java/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.java/ description: Learn about pyroscope.java title: pyroscope.java --- # pyroscope.java -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.java` continuously profiles Java processes running on the local Linux OS using [async-profiler](https://github.com/async-profiler/async-profiler). diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/reference/components/pyroscope.scrape.md similarity index 95% rename from docs/sources/flow/reference/components/pyroscope.scrape.md rename to docs/sources/reference/components/pyroscope.scrape.md index 813035c8e2..094ec77e26 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/reference/components/pyroscope.scrape.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.scrape/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.scrape/ description: Learn about pyroscope.scrape labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.scrape # pyroscope.scrape -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.scrape` collects [pprof] performance profiles for a given set of HTTP `targets`. @@ -42,7 +37,7 @@ The scraped performance profiles can be forwarded to components such as Multiple `pyroscope.scrape` components can be specified by giving them different labels. -[debug UI]: {{< relref "../../tasks/debug.md" >}} +[debug UI]: ../../../tasks/debug/ ## Usage @@ -95,7 +90,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} #### `job_name` argument @@ -219,19 +214,19 @@ the defaults documented in [profile.mutex][] will be used. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### profiling_config block @@ -410,7 +405,7 @@ APIs. If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, this block is a no-op. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Common configuration diff --git a/docs/sources/flow/reference/components/pyroscope.write.md b/docs/sources/reference/components/pyroscope.write.md similarity index 84% rename from docs/sources/flow/reference/components/pyroscope.write.md rename to docs/sources/reference/components/pyroscope.write.md index 403aef0719..d08735e216 100644 --- a/docs/sources/flow/reference/components/pyroscope.write.md +++ b/docs/sources/reference/components/pyroscope.write.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.write/ description: Learn about pyroscope.write labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.write # pyroscope.write -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.write` receives performance profiles from other components and forwards them to a series of user-supplied endpoints using [Pyroscope' Push API](/oss/pyroscope/). @@ -99,26 +94,26 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} When multiple `endpoint` blocks are provided, profiles are concurrently forwarded to all configured locations. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.http.md b/docs/sources/reference/components/remote.http.md similarity index 54% rename from docs/sources/flow/reference/components/remote.http.md rename to docs/sources/reference/components/remote.http.md index e91fc6c409..a959739954 100644 --- a/docs/sources/flow/reference/components/remote.http.md +++ b/docs/sources/reference/components/remote.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.http/ description: Learn about remote.http title: remote.http --- @@ -32,15 +27,15 @@ remote.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`url` | `string` | URL to poll. | | yes -`method` | `string` | Define HTTP method for the request | `"GET"` | no -`headers` | `map(string)` | Custom headers for the request. | `{}` | no -`body` | `string` | The request body. | `""` | no -`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no -`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no -`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no +Name | Type | Description | Default | Required +-----------------|---------------|----------------------------------------------------------|---------|--------- +`url` | `string` | URL to poll. | | yes +`method` | `string` | Define HTTP method for the request | `"GET"` | no +`headers` | `map(string)` | Custom headers for the request. | `{}` | no +`body` | `string` | The request body. | `""` | no +`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no +`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no +`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no When `remote.http` performs a poll operation, an HTTP `GET` request is made against the URL specified by the `url` argument. A poll is triggered by the @@ -54,20 +49,20 @@ The poll is successful if the URL returns a `200 OK` response code. All other response codes are treated as errors and mark the component as unhealthy. After a successful poll, the response body from the URL is exported. -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks The following blocks are supported inside the definition of `remote.http`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | HTTP client settings when connecting to the endpoint. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | HTTP client settings when connecting to the endpoint. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to an `basic_auth` block defined inside a `client` block. @@ -83,34 +78,34 @@ basic_auth` refers to an `basic_auth` block defined inside a `client` block. The `client` block configures settings used to connect to the HTTP server. -{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-config-block.md" source="alloy" version="" >}} ### basic_auth block The `basic_auth` block configures basic authentication to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block The `authorization` block configures custom authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block The `oauth2` block configures OAuth2 authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block The `tls_config` block configures TLS settings for connecting to HTTPS servers. -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md b/docs/sources/reference/components/remote.kubernetes.configmap.md similarity index 87% rename from docs/sources/flow/reference/components/remote.kubernetes.configmap.md rename to docs/sources/reference/components/remote.kubernetes.configmap.md index adbaf214d2..d4c37cbc05 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md +++ b/docs/sources/reference/components/remote.kubernetes.configmap.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.configmap/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.configmap/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.configmap/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.kubernetes.configmap/ description: Learn about remote.kubernetes.configmap title: remote.kubernetes.configmap --- @@ -93,23 +90,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.kubernetes.secret.md b/docs/sources/reference/components/remote.kubernetes.secret.md similarity index 87% rename from docs/sources/flow/reference/components/remote.kubernetes.secret.md rename to docs/sources/reference/components/remote.kubernetes.secret.md index 8e5a7cd966..00ae508a95 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.secret.md +++ b/docs/sources/reference/components/remote.kubernetes.secret.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.secret/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.secret/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.secret/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.kubernetes.secret/ description: Learn about remote.kubernetes.secret title: remote.kubernetes.secret --- @@ -92,23 +89,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -131,7 +128,7 @@ nonsensitive(remote.kubernetes.secret.LABEL.data.KEY_NAME) Using `nonsensitive` allows for using the exports of `remote.kubernetes.secret` for attributes in components that do not support secrets. -[nonsensitive]: {{< relref "../stdlib/nonsensitive.md" >}} +[nonsensitive]: ../../stdlib/nonsensitive/ ## Component health diff --git a/docs/sources/flow/reference/components/remote.s3.md b/docs/sources/reference/components/remote.s3.md similarity index 87% rename from docs/sources/flow/reference/components/remote.s3.md rename to docs/sources/reference/components/remote.s3.md index c4ec8e195e..a0ad69a767 100644 --- a/docs/sources/flow/reference/components/remote.s3.md +++ b/docs/sources/reference/components/remote.s3.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.s3/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.s3/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.s3/ description: Learn about remote.s3 title: remote.s3 --- @@ -44,7 +39,7 @@ Name | Type | Description | Default | Required > **NOTE**: `path` must include a full path to a file. This does not support reading of directories. -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks diff --git a/docs/sources/flow/reference/components/remote.vault.md b/docs/sources/reference/components/remote.vault.md similarity index 82% rename from docs/sources/flow/reference/components/remote.vault.md rename to docs/sources/reference/components/remote.vault.md index a4491bd25c..d8c2516cb1 100644 --- a/docs/sources/flow/reference/components/remote.vault.md +++ b/docs/sources/reference/components/remote.vault.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.vault/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.vault/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.vault/ description: Learn about remote.vault title: remote.vault --- @@ -39,12 +33,12 @@ remote.vault "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`server` | `string` | The Vault server to connect to. | | yes -`namespace` | `string` | The Vault namespace to connect to (Vault Enterprise only). | | no -`path` | `string` | The path to retrieve a secret from. | | yes -`reread_frequency` | `duration` | Rate to re-read keys. | `"0s"` | no +Name | Type | Description | Default | Required +-------------------|------------|------------------------------------------------------------|---------|--------- +`server` | `string` | The Vault server to connect to. | | yes +`namespace` | `string` | The Vault namespace to connect to (Vault Enterprise only). | | no +`path` | `string` | The path to retrieve a secret from. | | yes +`reread_frequency` | `duration` | Rate to re-read keys. | `"0s"` | no Tokens with a lease will be automatically renewed roughly two-thirds through their lease duration. If the leased token isn't renewable, or renewing the @@ -58,18 +52,18 @@ at a frequency specified by the `reread_frequency` argument. Setting The following blocks are supported inside the definition of `remote.vault`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client_options | [client_options][] | Options for the Vault client. | no -auth.token | [auth.token][] | Authenticate to Vault with a token. | no -auth.approle | [auth.approle][] | Authenticate to Vault using AppRole. | no -auth.aws | [auth.aws][] | Authenticate to Vault using AWS. | no -auth.azure | [auth.azure][] | Authenticate to Vault using Azure. | no -auth.gcp | [auth.gcp][] | Authenticate to Vault using GCP. | no -auth.kubernetes | [auth.kubernetes][] | Authenticate to Vault using Kubernetes. | no -auth.ldap | [auth.ldap][] | Authenticate to Vault using LDAP. | no -auth.userpass | [auth.userpass][] | Authenticate to Vault using a username and password. | no -auth.custom | [auth.custom][] | Authenticate to Vault with custom authentication. | no +Hierarchy | Block | Description | Required +----------------|---------------------|------------------------------------------------------|--------- +client_options | [client_options][] | Options for the Vault client. | no +auth.token | [auth.token][] | Authenticate to Vault with a token. | no +auth.approle | [auth.approle][] | Authenticate to Vault using AppRole. | no +auth.aws | [auth.aws][] | Authenticate to Vault using AWS. | no +auth.azure | [auth.azure][] | Authenticate to Vault using Azure. | no +auth.gcp | [auth.gcp][] | Authenticate to Vault using GCP. | no +auth.kubernetes | [auth.kubernetes][] | Authenticate to Vault using Kubernetes. | no +auth.ldap | [auth.ldap][] | Authenticate to Vault using LDAP. | no +auth.userpass | [auth.userpass][] | Authenticate to Vault using a username and password. | no +auth.custom | [auth.custom][] | Authenticate to Vault with custom authentication. | no Exactly one `auth.*` block **must** be provided, otherwise the component will fail to load. @@ -89,12 +83,12 @@ fail to load. The `client_options` block customizes the connection to vault. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +-----------------|------------|-------------------------------------------------------|------------|--------- `min_retry_wait` | `duration` | Minimum time to wait before retrying failed requests. | `"1000ms"` | no `max_retry_wait` | `duration` | Maximum time to wait before retrying failed requests. | `"1500ms"` | no -`max_retries` | `int` | Maximum number of times to retry after a 5xx error. | `2` | no -`timeout` | `duration` | Maximum time to wait before a request times out. | `"60s"` | no +`max_retries` | `int` | Maximum number of times to retry after a 5xx error. | `2` | no +`timeout` | `duration` | Maximum time to wait before a request times out. | `"60s"` | no Requests which fail due to server errors (HTTP 5xx error codes) can be retried. The `max_retries` argument specifies how many times to retry failed requests. @@ -284,7 +278,7 @@ nonsensitive(remote.vault.LABEL.data.KEY_NAME) Using `nonsensitive` allows for using the exports of `remote.vault` for attributes in components that do not support secrets. -[nonsensitive]: {{< relref "../stdlib/nonsensitive.md" >}} +[nonsensitive]: ../../stdlib/nonsensitive/ ## Component health diff --git a/docs/sources/reference/config-blocks/_index.md b/docs/sources/reference/config-blocks/_index.md new file mode 100644 index 0000000000..e3ac635534 --- /dev/null +++ b/docs/sources/reference/config-blocks/_index.md @@ -0,0 +1,17 @@ +--- +aliases: +- ./reference/config-blocks/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/ +description: Learn about configuration blocks +title: Configuration blocks +weight: 200 +--- + +# Configuration blocks + +Configuration blocks are optional top-level blocks that can be used to configure various parts of the {{< param "PRODUCT_NAME" >}} process. +Each configuration block can only be defined once. + +Configuration blocks are _not_ components, so they have no exports. + +{{< section >}} diff --git a/docs/sources/flow/reference/config-blocks/argument.md b/docs/sources/reference/config-blocks/argument.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/argument.md rename to docs/sources/reference/config-blocks/argument.md index 758ec1d5ee..ff265c9e31 100644 --- a/docs/sources/flow/reference/config-blocks/argument.md +++ b/docs/sources/reference/config-blocks/argument.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/argument/ +- ./reference/config-blocks/argument/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/argument/ description: Learn about the argument configuration block menuTitle: argument title: argument block @@ -21,7 +18,7 @@ The `argument` block may only be specified inside the definition of [a `declare` In [classic modules][], the `argument` block is valid as a top-level block in a classic module. Classic modules are deprecated and scheduled to be removed in the release after v0.40. -[classic modules]: https://grafana.com/docs/agent//flow/concepts/modules/#classic-modules-deprecated +[classic modules]: ../../../concepts/modules/#classic-modules-deprecated {{< /admonition >}} ## Example @@ -81,9 +78,5 @@ declare "self_collect" { } ``` -{{% docs/reference %}} -[custom component]: "/docs/agent/ -> /docs/agent//flow/concepts/custom_components" -[custom component]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/custom_components" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -{{% /docs/reference %}} +[custom component]: ../../../concepts/custom_components/ +[declare]: ../..//config-blocks/declare/ diff --git a/docs/sources/flow/reference/config-blocks/declare.md b/docs/sources/reference/config-blocks/declare.md similarity index 56% rename from docs/sources/flow/reference/config-blocks/declare.md rename to docs/sources/reference/config-blocks/declare.md index f4f6f455a4..d8f8179eee 100644 --- a/docs/sources/flow/reference/config-blocks/declare.md +++ b/docs/sources/reference/config-blocks/declare.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/declare/ +- ./flow/reference/config-blocks/declare/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/declare/ description: Learn about the declare configuration block menuTitle: declare title: declare block @@ -73,13 +70,8 @@ prometheus.remote_write "example" { } ``` -{{% docs/reference %}} -[argument]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/argument" -[argument]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument" -[export]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/export" -[export]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -[import]: "/docs/agent/ -> /docs/agent//flow/concepts/modules#importing-modules" -[import]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules#importing-modules" -{{% /docs/reference %}} +[argument]: ../argument/ +[export]: ../export/ +[declare]: ../declare/ +[import]: ../../../concepts/modules/#importing-modules +[custom component]: ../../../concepts/custom_components/ diff --git a/docs/sources/flow/reference/config-blocks/export.md b/docs/sources/reference/config-blocks/export.md similarity index 57% rename from docs/sources/flow/reference/config-blocks/export.md rename to docs/sources/reference/config-blocks/export.md index 0b119e4b63..4b28a6497d 100644 --- a/docs/sources/flow/reference/config-blocks/export.md +++ b/docs/sources/reference/config-blocks/export.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/export/ +- ./reference/config-blocks/export/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/export/ description: Learn about the export configuration block menuTitle: export title: export block @@ -18,9 +15,10 @@ title: export block The `export` block may only be specified inside the definition of [a `declare` block][declare]. {{< admonition type="note" >}} -In [classic modules][], the `export` block is valid as a top-level block in a classic module. Classic modules are deprecated and scheduled to be removed in the release after v0.40. +In [classic modules][], the `export` block is valid as a top-level block in a classic module. +Classic modules are deprecated and scheduled to be removed in the release after v0.40. -[classic modules]: https://grafana.com/docs/agent//flow/concepts/modules/#classic-modules-deprecated +[classic modules]: ../../../concepts/modules/#classic-modules-deprecated {{< /admonition >}} ## Example @@ -69,9 +67,5 @@ declare "pods_and_nodes" { } ``` -{{% docs/reference %}} -[custom component]: "/docs/agent/ -> /docs/agent//flow/concepts/custom_components" -[custom component]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/custom_components" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -{{% /docs/reference %}} +[custom component]: ../../../concepts/custom_components/ +[declare]: ../declare/ diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/reference/config-blocks/http.md similarity index 94% rename from docs/sources/flow/reference/config-blocks/http.md rename to docs/sources/reference/config-blocks/http.md index 03a52010a8..c0718a760f 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/reference/config-blocks/http.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/http/ +- ./reference/config-blocks/http/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/http/ description: Learn about the http configuration block menuTitle: http title: http block @@ -41,11 +38,6 @@ tls > windows_certificate_filter | [windows_certificate_filter][] | Con tls > windows_certificate_filter > client | [client][] | Configure client certificates for Windows certificate filter. | no tls > windows_certificate_filter > server | [server][] | Configure server certificates for Windows certificate filter. | no -[tls]: #tls-block -[windows_certificate_filter]: #windows-certificate-filter-block -[server]: #server-block -[client]: #client-block - ### tls block The `tls` block configures TLS settings for the HTTP server. @@ -71,9 +63,7 @@ Name | Type | Description `min_version` | `string` | Oldest TLS version to accept from clients. | `""` | no `max_version` | `string` | Newest TLS version to accept from clients. | `""` | no -When the `tls` block is specified, arguments for the TLS certificate (using -`cert_pem` or `cert_file`) and for the TLS key (using `key_pem` or `key_file`) -are required. +When the `tls` block is specified, arguments for the TLS certificate (using `cert_pem` or `cert_file`) and for the TLS key (using `key_pem` or `key_file`) are required. The following pairs of arguments are mutually exclusive, and only one may be configured at a time: @@ -120,9 +110,9 @@ The set of cipher suites specified may be from the following: | `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` | no | | `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` | no | -The `curve_preferences` argument determines the set of elliptic curves to -prefer during a handshake in preference order. If not provided, a default list -is used. The set of elliptic curves specified may be from the following: +The `curve_preferences` argument determines the set of elliptic curves to prefer during a handshake in preference order. +If not provided, a default list is used. +The set of elliptic curves specified may be from the following: | Curve | Allowed in `boringcrypto` builds | | ----------- | -------------------------------- | @@ -186,3 +176,8 @@ Name | Type | Description `issuer_common_names` | `list(string)` | Issuer common names to check against. | | no `subject_regex` | `string` | Regular expression to match Subject name. | `""` | no `template_id` | `string` | Client Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no + +[tls]: #tls-block +[windows_certificate_filter]: #windows-certificate-filter-block +[server]: #server-block +[client]: #client-block diff --git a/docs/sources/flow/reference/config-blocks/import.file.md b/docs/sources/reference/config-blocks/import.file.md similarity index 63% rename from docs/sources/flow/reference/config-blocks/import.file.md rename to docs/sources/reference/config-blocks/import.file.md index 8958c00c5e..60a6cee183 100644 --- a/docs/sources/flow/reference/config-blocks/import.file.md +++ b/docs/sources/reference/config-blocks/import.file.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.file/ +- ./reference/config-blocks/import.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.file/ description: Learn about the import.file configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.file # import.file -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.file` block imports custom components from a file and exposes them to the importer. `import.file` blocks must be given a label that determines the namespace where custom components are exposed. @@ -36,7 +33,7 @@ Name | Type | Description `detector` | `string` | Which file change detector to use (fsnotify, poll). | `"fsnotify"` | no `poll_frequency` | `duration` | How often to poll for file changes. | `"1m"` | no -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} ## Example @@ -67,8 +64,3 @@ math.add "default" { } ``` {{< /collapse >}} - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/import.git.md b/docs/sources/reference/config-blocks/import.git.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/import.git.md rename to docs/sources/reference/config-blocks/import.git.md index b08852ff29..f7b7f724b1 100644 --- a/docs/sources/flow/reference/config-blocks/import.git.md +++ b/docs/sources/reference/config-blocks/import.git.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.git/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.git/ +- ./reference/config-blocks/import.git/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.git/ description: Learn about the import.git configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.git # import.git -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.git` block imports custom components from a Git repository and exposes them to the importer. `import.git` blocks must be given a label that determines the namespace where custom components are exposed. @@ -38,18 +35,13 @@ Name | Type | Description `path` | `string` | The path in the repository where the module is stored. | | yes `pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no -The `repository` attribute must be set to a repository address that would be -recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as -`https://github.com/grafana/agent.git`. +The `repository` attribute must be set to a repository address that would be recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/alloy.git`. -You must set the `repository` attribute to a repository address that Git would recognize -with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/agent.git`. +You must set the `repository` attribute to a repository address that Git would recognize with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/alloy.git`. -When provided, the `revision` attribute must be set to a valid branch, tag, or -commit SHA within the repository. +When provided, the `revision` attribute must be set to a valid branch, tag, or commit SHA within the repository. -You must set the `path` attribute to a path accessible from the repository's root, -such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. +You must set the `path` attribute to a path accessible from the repository's root, such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. If `pull_frequency` isn't `"0s"`, the Git repository is pulled for updates at the frequency specified. If it's set to `"0s"`, the Git repository is pulled once on init. @@ -69,7 +61,7 @@ ssh_key | [ssh_key][] | Configure an SSH Key for authenticating to the rep ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### ssh_key block @@ -99,8 +91,3 @@ math.add "default" { [basic_auth]: #basic_auth-block [ssh_key]: #ssh_key-block - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/import.http.md b/docs/sources/reference/config-blocks/import.http.md similarity index 72% rename from docs/sources/flow/reference/config-blocks/import.http.md rename to docs/sources/reference/config-blocks/import.http.md index c04ae1711c..c788166f81 100644 --- a/docs/sources/flow/reference/config-blocks/import.http.md +++ b/docs/sources/reference/config-blocks/import.http.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.http/ +- ./reference/config-blocks/import.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.http/ description: Learn about the import.http configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.http # import.http -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `import.http` retrieves a module from an HTTP server. diff --git a/docs/sources/flow/reference/config-blocks/import.string.md b/docs/sources/reference/config-blocks/import.string.md similarity index 65% rename from docs/sources/flow/reference/config-blocks/import.string.md rename to docs/sources/reference/config-blocks/import.string.md index b5ee71c4c6..8259a11b3c 100644 --- a/docs/sources/flow/reference/config-blocks/import.string.md +++ b/docs/sources/reference/config-blocks/import.string.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.string/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.string/ +- ./reference/config-blocks/import.string/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.string/ description: Learn about the import.string configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.string # import.string -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.string` block imports custom components from a string and exposes them to the importer. `import.string` blocks must be given a label that determines the namespace where custom components are exposed. @@ -59,8 +56,3 @@ math.add "default" { b = 45 } ``` - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/logging.md b/docs/sources/reference/config-blocks/logging.md similarity index 85% rename from docs/sources/flow/reference/config-blocks/logging.md rename to docs/sources/reference/config-blocks/logging.md index 23f3e84e90..090cd64ecc 100644 --- a/docs/sources/flow/reference/config-blocks/logging.md +++ b/docs/sources/reference/config-blocks/logging.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/logging/ +- ./reference/config-blocks/logging/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/logging/ description: Learn about the logging configuration block menuTitle: logging title: logging block @@ -50,15 +47,11 @@ The following strings are recognized as valid log line formats: * `"logfmt"`: Write logs as [logfmt][] lines. * `"json"`: Write logs as JSON objects. -[logfmt]: https://brandur.org/logfmt - ### Log receivers The `write_to` argument allows {{< param "PRODUCT_NAME" >}} to tee its log entries to one or more `loki.*` component log receivers in addition to the default [location][]. This, for example can be the export of a `loki.write` component to ship log entries directly to Loki, or a `loki.relabel` component to add a certain label first. -[location]: #log-location - ## Log location {{< param "PRODUCT_NAME" >}} writes all logs to `stderr`. @@ -70,3 +63,6 @@ When running {{< param "PRODUCT_NAME" >}} as a container, view logs written to ` When running {{< param "PRODUCT_NAME" >}} as a Windows service, logs are instead written as event logs. You can view the logs through Event Viewer. In other cases, redirect `stderr` of the {{< param "PRODUCT_NAME" >}} process to a file for logs to persist on disk. + +[logfmt]: https://brandur.org/logfmt +[location]: #log-location diff --git a/docs/sources/flow/reference/config-blocks/remotecfg.md b/docs/sources/reference/config-blocks/remotecfg.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/remotecfg.md rename to docs/sources/reference/config-blocks/remotecfg.md index a175c9e169..233b350903 100644 --- a/docs/sources/flow/reference/config-blocks/remotecfg.md +++ b/docs/sources/reference/config-blocks/remotecfg.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/remotecfg/ -canonical: remotecfgs://grafana.com/docs/agent/latest/flow/reference/config-blocks/remotecfg/ +- ./reference/config-blocks/remotecfg/ +canonical: remotecfgs://grafana.com/docs/alloy/latest/reference/config-blocks/remotecfg/ description: Learn about the remotecfg configuration block menuTitle: remotecfg title: remotecfg block @@ -18,8 +15,7 @@ title: remotecfg block The [API definition][] for managing and fetching configuration that the `remotecfg` block uses is available under the Apache 2.0 license. > **BETA**: The `remotecfg` enables [beta][] functionality. -> Beta features are subject to breaking changes, and may be replaced with -> equivalent functionality that cover the same use case. +> Beta features are subject to breaking changes, and may be replaced with equivalent functionality that cover the same use case. ## Example @@ -50,9 +46,7 @@ Name | Type | Description If the `url` is not set, then the service block is a no-op. -If not set, the self-reported `id` that the Agent uses is a randomly generated, -anonymous unique ID (UUID) that is stored as an `agent_seed.json` file in the -Agent's storage path so that it can persist across restarts. +If not set, the self-reported `id` that {{< param "PRODUCT_NAME" >}} uses is a randomly generated, anonymous unique ID (UUID) that is stored as an `agent_seed.json` file in {{< param "PRODUCT_NAME" >}}'s storage path so that it can persist across restarts. The `id` and `metadata` fields are used in the periodic request sent to the remote endpoint so that the API can decide what configuration to serve. @@ -74,22 +68,22 @@ For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} [API definition]: https://github.com/grafana/agent-remote-config -[beta]: https://grafana.com/docs/agent//stability/#beta +[beta]: ../../../stability/#beta [basic_auth]: #basic_auth-block [authorization]: #authorization-block [oauth2]: #oauth2-block diff --git a/docs/sources/flow/reference/config-blocks/tracing.md b/docs/sources/reference/config-blocks/tracing.md similarity index 60% rename from docs/sources/flow/reference/config-blocks/tracing.md rename to docs/sources/reference/config-blocks/tracing.md index 860c8e4c79..19bdcc28cc 100644 --- a/docs/sources/flow/reference/config-blocks/tracing.md +++ b/docs/sources/reference/config-blocks/tracing.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/tracing/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/tracing/ +- ./reference/config-blocks/tracing/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/tracing/ description: Learn about the tracing configuration block menuTitle: tracing title: tracing block @@ -50,13 +47,12 @@ processing. The elements in the array can be any `otelcol` component that accept traces, including processors and exporters. When `write_to` is set to an empty array `[]`, all traces are dropped. -> **NOTE**: Any traces generated before the `tracing` block has been evaluated, -> such as at the early start of the process' lifetime, are dropped. +{{< admonition type="note" >}} +Any traces generated before the `tracing` block has been evaluated,such as at the early start of the process' lifetime, are dropped. +{{< /admonition >}} -The `sampling_fraction` argument controls what percentage of generated traces -should be sent to the consumers specified by `write_to`. When set to `1` or -greater, 100% of traces are kept. When set to `0` or lower, 0% of traces are -kept. +The `sampling_fraction` argument controls what percentage of generated traces should be sent to the consumers specified by `write_to`. +When set to `1` or greater, 100% of traces are kept. When set to `0` or lower, 0% of traces are kept. ## Blocks @@ -67,25 +63,18 @@ Hierarchy | Block | Description sampler | [sampler][] | Define custom sampling on top of the base sampling fraction. | no sampler > jaeger_remote | [jaeger_remote][] | Retrieve sampling information via a Jaeger remote sampler. | no -The `>` symbol indicates deeper levels of nesting. For example, `sampler > -jaeger_remote` refers to a `jaeger_remote` block defined inside an `sampler` -block. - -[sampler]: #sampler-block -[jaeger_remote]: #jaeger_remote-block +The `>` symbol indicates deeper levels of nesting. For example, `sampler > jaeger_remote` refers to a `jaeger_remote` block defined inside an `sampler` block. ### sampler block -The `sampler` block contains a definition of a custom sampler to use. The -`sampler` block supports no arguments and is controlled fully through inner -blocks. +The `sampler` block contains a definition of a custom sampler to use. +The `sampler` block supports no arguments and is controlled fully through inner blocks. -It is invalid to define more than one sampler to use in the `sampler` block. +It's invalid to define more than one sampler to use in the `sampler` block. ### jaeger_remote block -The `jaeger_remote` block configures the retrieval of sampling information -through a remote server that exposes Jaeger sampling strategies. +The `jaeger_remote` block configures the retrieval of sampling information through a remote server that exposes Jaeger sampling strategies. Name | Type | Description | Default | Required -------------------|------------|------------------------------------------------------------|------------------------------------|--------- @@ -93,24 +82,23 @@ Name | Type | Description `max_operations` | `number` | Limit number of operations which can have custom sampling. | `256` | no `refresh_interval` | `duration` | Frequency to poll the URL for new sampling strategies. | `"1m"` | no -The remote sampling strategies are retrieved from the URL specified by the -`url` argument, and polled for updates on a timer. The frequency for how oftenName | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`names` | `list(string)` | DNS names to look up. | | yes -`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no -`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no -`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no -polling occurs is controlled by the `refresh_interval` argument. - -Requests to the remote sampling strategies server are made through an HTTP -`GET` request to the configured `url` argument. A `service=grafana-agent` query -parameter is always added to the URL to allow the server to respond with -service-specific strategies. The HTTP response body is read as JSON matching -the schema specified by Jaeger's [`strategies.json` file][Jaeger sampling -strategies]. - -The `max_operations` limits the amount of custom span names that can have -custom sampling rules. If the remote sampling strategy exceeds the limit, -sampling decisions fall back to the default sampler. +The remote sampling strategies are retrieved from the URL specified by the `url` argument, and polled for updates on a timer. +The frequency for how often polling occurs is controlled by the `refresh_interval` argument. + +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------------------------------|---------|--------- +`names` | `list(string)` | DNS names to look up. | | yes +`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no +`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no +`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no + +Requests to the remote sampling strategies server are made through an HTTP `GET` request to the configured `url` argument. +A `service=grafana-agent` query parameter is always added to the URL to allow the server to respond with service-specific strategies. +The HTTP response body is read as JSON matching the schema specified by Jaeger's [`strategies.json` file][Jaeger sampling strategies]. + +The `max_operations` limits the amount of custom span names that can have custom sampling rules. +If the remote sampling strategy exceeds the limit, sampling decisions fall back to the default sampler. [Jaeger sampling strategies]: https://www.jaegertracing.io/docs/1.22/sampling/#collector-sampling-configuration +[sampler]: #sampler-block +[jaeger_remote]: #jaeger_remote-block diff --git a/docs/sources/reference/stdlib/_index.md b/docs/sources/reference/stdlib/_index.md new file mode 100644 index 0000000000..489dfadadb --- /dev/null +++ b/docs/sources/reference/stdlib/_index.md @@ -0,0 +1,19 @@ +--- +aliases: +- ./reference/stdlib/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/ +description: The standard library is a list of functions used in expressions when assigning values to attributes +title: Standard library +weight: 400 +--- + +# Standard library + +The standard library is a list of functions you can use in expressions when assigning values to attributes. + +All standard library functions are [pure functions][]. +The functions always return the same output if given the same input. + +{{< section >}} + +[pure functions]: https://en.wikipedia.org/wiki/Pure_function diff --git a/docs/sources/reference/stdlib/coalesce.md b/docs/sources/reference/stdlib/coalesce.md new file mode 100644 index 0000000000..a089644c90 --- /dev/null +++ b/docs/sources/reference/stdlib/coalesce.md @@ -0,0 +1,24 @@ +--- +aliases: +- ./reference/stdlib/coalesce/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/coalesce/ +description: Learn about coalesce +title: coalesce +--- + +# coalesce + +`coalesce` takes any number of arguments and returns the first one that isn't null, an empty string, empty list, or an empty object. +It's useful for obtaining a default value, such as if an environment variable isn't defined. +If no argument is non-empty or non-zero, the last argument is returned. + +## Examples + +``` +> coalesce("a", "b") +a +> coalesce("", "b") +b +> coalesce(env("DOES_NOT_EXIST"), "c") +c +``` diff --git a/docs/sources/reference/stdlib/concat.md b/docs/sources/reference/stdlib/concat.md new file mode 100644 index 0000000000..c233fbaa2c --- /dev/null +++ b/docs/sources/reference/stdlib/concat.md @@ -0,0 +1,29 @@ +--- +aliases: +- ./reference/stdlib/concat/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/concat/ +description: Learn about concat +title: concat +--- + +# concat + +The `concat` function concatenates one or more lists of values into a single list. +Each argument to `concat` must be a list value. +Elements within the list can be any type. + +## Examples + +``` +> concat([]) +[] + +> concat([1, 2], [3, 4]) +[1, 2, 3, 4] + +> concat([1, 2], [], [bool, null]) +[1, 2, bool, null] + +> concat([[1, 2], [3, 4]], [[5, 6]]) +[[1, 2], [3, 4], [5, 6]] +``` diff --git a/docs/sources/reference/stdlib/constants.md b/docs/sources/reference/stdlib/constants.md new file mode 100644 index 0000000000..3e9e394af2 --- /dev/null +++ b/docs/sources/reference/stdlib/constants.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/constants/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/constants/ +description: Learn about constants +title: constants +--- + +# constants + +The `constants` object exposes a list of constant values about the system {{< param "PRODUCT_NAME" >}} is running on: + +* `constants.hostname`: The hostname of the machine {{< param "PRODUCT_NAME" >}} is running on. +* `constants.os`: The operating system {{< param "PRODUCT_NAME" >}} is running on. +* `constants.arch`: The architecture of the system {{< param "PRODUCT_NAME" >}} is running on. + +## Examples + +``` +> constants.hostname +"my-hostname" + +> constants.os +"linux" + +> constants.arch +"amd64" +``` diff --git a/docs/sources/reference/stdlib/env.md b/docs/sources/reference/stdlib/env.md new file mode 100644 index 0000000000..84a68ed3b9 --- /dev/null +++ b/docs/sources/reference/stdlib/env.md @@ -0,0 +1,22 @@ +--- +aliases: +- ./reference/stdlib/env/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/env/ +description: Learn about env +title: env +--- + +# env + +The `env` function gets the value of an environment variable from the system {{< param "PRODUCT_NAME" >}} is running on. +If the environment variable does not exist, `env` returns an empty string. + +## Examples + +``` +> env("HOME") +"/home/grafana-agent" + +> env("DOES_NOT_EXIST") +"" +``` diff --git a/docs/sources/flow/reference/stdlib/format.md b/docs/sources/reference/stdlib/format.md similarity index 66% rename from docs/sources/flow/reference/stdlib/format.md rename to docs/sources/reference/stdlib/format.md index be5d9cd754..4f5d227cd8 100644 --- a/docs/sources/flow/reference/stdlib/format.md +++ b/docs/sources/reference/stdlib/format.md @@ -1,20 +1,15 @@ --- aliases: -- ../../configuration-language/standard-library/format/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/format/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/format/ +- ./reference/stdlib/format/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/format/ description: Learn about format title: format --- # format -The `format` function produces a string by formatting a number of other values according -to a specification string. It is similar to the `printf` function in C, and -other similar functions in other programming languages. +The `format` function produces a string by formatting a number of other values according to a specification string. +It's similar to the `printf` function in C, and other similar functions in other programming languages. ```river format(spec, values...) @@ -33,21 +28,16 @@ The `format` function is most useful when you use more complex format specificat ## Specification Syntax -The specification is a string that includes formatting verbs that are introduced -with the `%` character. The function call must then have one additional argument -for each verb sequence in the specification. The verbs are matched with -consecutive arguments and formatted as directed, as long as each given argument -is convertible to the type required by the format verb. +The specification is a string that includes formatting verbs that are introduced with the `%` character. +The function call must then have one additional argument for each verb sequence in the specification. +The verbs are matched with consecutive arguments and formatted as directed, as long as each given argument is convertible to the type required by the format verb. By default, `%` sequences consume successive arguments starting with the first. -Introducing a `[n]` sequence immediately before the verb letter, where `n` is a -decimal integer, explicitly chooses a particular value argument by its -one-based index. Subsequent calls without an explicit index will then proceed -with `n`+1, `n`+2, etc. +Introducing a `[n]` sequence immediately before the verb letter, where `n` is a decimal integer, explicitly chooses a particular value argument by its one-based index. +Subsequent calls without an explicit index will then proceed with `n`+1, `n`+2, etc. -The function produces an error if the format string requests an impossible -conversion or accesses more arguments than are given. An error is also produced -for an unsupported format verb. +The function produces an error if the format string requests an impossible conversion or accesses more arguments than are given. +An error is also produced for an unsupported format verb. ### Verbs diff --git a/docs/sources/reference/stdlib/join.md b/docs/sources/reference/stdlib/join.md new file mode 100644 index 0000000000..fdec1cbf0e --- /dev/null +++ b/docs/sources/reference/stdlib/join.md @@ -0,0 +1,26 @@ +--- +aliases: +- ./reference/stdlib/join/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/join/ +description: Learn about join +title: join +--- + +# join + +`join` all items in an array into a string, using a character as separator. + +```river +join(list, separator) +``` + +## Examples + +```river +> join(["foo", "bar", "baz"], "-") +"foo-bar-baz" +> join(["foo", "bar", "baz"], ", ") +"foo, bar, baz" +> join(["foo"], ", ") +"foo" +``` diff --git a/docs/sources/reference/stdlib/json_decode.md b/docs/sources/reference/stdlib/json_decode.md new file mode 100644 index 0000000000..6bc68ca250 --- /dev/null +++ b/docs/sources/reference/stdlib/json_decode.md @@ -0,0 +1,41 @@ +--- +aliases: +- ./reference/stdlib/json_decode/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/json_decode/ +description: Learn about json_decode +title: json_decode +--- + +# json_decode + +The `json_decode` function decodes a string representing JSON into a River value. +`json_decode` fails if the string argument provided can't be parsed as JSON. + +A common use case of `json_decode` is to decode the output of a [`local.file`][] component to a River value. + +> Remember to escape double quotes when passing JSON string literals to `json_decode`. +> +> For example, the JSON value `{"key": "value"}` is properly represented by the string `"{\"key\": \"value\"}"`. + +## Examples + +``` +> json_decode("15") +15 + +> json_decode("[1, 2, 3]") +[1, 2, 3] + +> json_decode("null") +null + +> json_decode("{\"key\": \"value\"}") +{ + key = "value", +} + +> json_decode(local.file.some_file.content) +"Hello, world!" +``` + +[`local.file`]: ../../components/local.file/ diff --git a/docs/sources/reference/stdlib/json_path.md b/docs/sources/reference/stdlib/json_path.md new file mode 100644 index 0000000000..dda12738a0 --- /dev/null +++ b/docs/sources/reference/stdlib/json_path.md @@ -0,0 +1,43 @@ +--- +aliases: +- ./reference/stdlib/json_path/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/json_path/ +description: Learn about json_path +title: json_path +--- + +# json_path + +The `json_path` function lookup values using [jsonpath][] syntax. + +The function expects two strings. The first string is the JSON string used look up values. The second string is the JSONPath expression. + +`json_path` always returns a list of values. If the JSONPath expression doesn't match any values, an empty list is returned. + +A common use case of `json_path` is to decode and filter the output of a [`local.file`][] or [`remote.http`][] component to a River value. + +> Remember to escape double quotes when passing JSON string literals to `json_path`. +> +> For example, the JSON value `{"key": "value"}` is properly represented by the string `"{\"key\": \"value\"}"`. + +## Examples + +``` +> json_path("{\"key\": \"value\"}", ".key") +["value"] + + +> json_path("[{\"name\": \"Department\",\"value\": \"IT\"},{\"name\":\"TestStatus\",\"value\":\"Pending\"}]", "[?(@.name == \"Department\")].value") +["IT"] + +> json_path("{\"key\": \"value\"}", ".nonexists") +[] + +> json_path("{\"key\": \"value\"}", ".key")[0] +value + +``` + +[jsonpath]: https://goessner.net/articles/JsonPath/ +[`local.file`]: ../../components/local.file/ +[`remote.http`]: ../../components/remote.http/ diff --git a/docs/sources/reference/stdlib/nonsensitive.md b/docs/sources/reference/stdlib/nonsensitive.md new file mode 100644 index 0000000000..bacea7271e --- /dev/null +++ b/docs/sources/reference/stdlib/nonsensitive.md @@ -0,0 +1,30 @@ +--- +aliases: +- ./reference/stdlib/nonsensitive/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/nonsensitive/ +description: Learn about nonsensitive +title: nonsensitive +--- + +# nonsensitive + +`nonsensitive` converts a [secret][] value back into a string. + +{{< admonition type="warning" >}} +Only use `nonsensitive` when you are positive that the value converted back to a string isn't a sensitive value. + +Strings resulting from calls to `nonsensitive` are displayed in plain text in the UI and internal API calls. +{{< /admonition >}} + +## Examples + +``` +// Assuming `sensitive_value` is a secret: + +> sensitive_value +(secret) +> nonsensitive(sensitive_value) +"Hello, world!" +``` + +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets diff --git a/docs/sources/reference/stdlib/replace.md b/docs/sources/reference/stdlib/replace.md new file mode 100644 index 0000000000..89722e6364 --- /dev/null +++ b/docs/sources/reference/stdlib/replace.md @@ -0,0 +1,22 @@ +--- +aliases: +- .reference/stdlib/replace/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/replace/ +description: Learn about replace +title: replace +--- + +# replace + +`replace` searches a string for a substring, and replaces each occurrence of the substring with a replacement string. + +```river +replace(string, substring, replacement) +``` + +## Examples + +```river +> replace("1 + 2 + 3", "+", "-") +"1 - 2 - 3" +``` diff --git a/docs/sources/reference/stdlib/split.md b/docs/sources/reference/stdlib/split.md new file mode 100644 index 0000000000..c033739f90 --- /dev/null +++ b/docs/sources/reference/stdlib/split.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/split/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/split/ +description: Learn about split +title: split +--- + +# split + +`split` produces a list by dividing a string at all occurrences of a separator. + +```river +split(list, separator) +``` + +## Examples + +```river +> split("foo,bar,baz", "," ) +["foo", "bar", "baz"] + +> split("foo", ",") +["foo"] + +> split("", ",") +[""] +``` diff --git a/docs/sources/reference/stdlib/to_lower.md b/docs/sources/reference/stdlib/to_lower.md new file mode 100644 index 0000000000..27af0825fb --- /dev/null +++ b/docs/sources/reference/stdlib/to_lower.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/to_lower/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/to_lower/ +description: Learn about to_lower +title: to_lower +--- + +# to_lower + +`to_lower` converts all uppercase letters in a string to lowercase. + +## Examples + +```river +> to_lower("HELLO") +"hello" +``` diff --git a/docs/sources/reference/stdlib/to_upper.md b/docs/sources/reference/stdlib/to_upper.md new file mode 100644 index 0000000000..ee8c1509bf --- /dev/null +++ b/docs/sources/reference/stdlib/to_upper.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/to_upper/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/to_upper/ +description: Learn about to_upper +title: to_upper +--- + +# to_upper + +`to_upper` converts all lowercase letters in a string to uppercase. + +## Examples + +```river +> to_upper("hello") +"HELLO" +``` diff --git a/docs/sources/reference/stdlib/trim.md b/docs/sources/reference/stdlib/trim.md new file mode 100644 index 0000000000..5f904df6a4 --- /dev/null +++ b/docs/sources/reference/stdlib/trim.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/trim/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim/ +description: Learn about trim +title: trim +--- + +# trim + +`trim` removes the specified set of characters from the start and end of a string. + +```river +trim(string, str_character_set) +``` + +## Examples + +```river +> trim("?!hello?!", "!?") +"hello" + +> trim("foobar", "far") +"oob" + +> trim(" hello! world.! ", "! ") +"hello! world." +``` diff --git a/docs/sources/reference/stdlib/trim_prefix.md b/docs/sources/reference/stdlib/trim_prefix.md new file mode 100644 index 0000000000..6bb900f2dd --- /dev/null +++ b/docs/sources/reference/stdlib/trim_prefix.md @@ -0,0 +1,19 @@ +--- +aliases: +- ./reference/stdlib/trim_prefix/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_prefix/ +description: Learn about trim_prefix +title: trim_prefix +--- + +# trim_prefix + +`trim_prefix` removes the prefix from the start of a string. +If the string doesn't start with the prefix, the string is returned unchanged. + +## Examples + +```river +> trim_prefix("helloworld", "hello") +"world" +``` diff --git a/docs/sources/reference/stdlib/trim_space.md b/docs/sources/reference/stdlib/trim_space.md new file mode 100644 index 0000000000..06646e56af --- /dev/null +++ b/docs/sources/reference/stdlib/trim_space.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/trim_space/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_space/ +description: Learn about trim_space +title: trim_space +--- + +# trim_space + +`trim_space` removes any whitespace characters from the start and end of a string. + +## Examples + +```river +> trim_space(" hello\n\n") +"hello" +``` diff --git a/docs/sources/reference/stdlib/trim_suffix.md b/docs/sources/reference/stdlib/trim_suffix.md new file mode 100644 index 0000000000..d13c596230 --- /dev/null +++ b/docs/sources/reference/stdlib/trim_suffix.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/trim_suffix/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_suffix/ +description: Learn about trim_suffix +title: trim_suffix +--- + +# trim_suffix + +`trim_suffix` removes the suffix from the end of a string. + +## Examples + +```river +> trim_suffix("helloworld", "world") +"hello" +``` diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md new file mode 100644 index 0000000000..a665a5010c --- /dev/null +++ b/docs/sources/release-notes.md @@ -0,0 +1,15 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/release-notes/ +description: Release notes for Grafana Alloy +menuTitle: Release notes +title: Release notes for Grafana Alloy +weight: 999 +--- + +# Release notes for {{% param "PRODUCT_NAME" %}} + +The release notes provide information about deprecations and breaking changes in {{< param "PRODUCT_NAME" >}}. + +For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog][]. + +[Changelog]: https://github.com/grafana/alloy/blob/main/CHANGELOG.md \ No newline at end of file diff --git a/docs/sources/shared/deploy-agent.md b/docs/sources/shared/deploy-agent.md deleted file mode 100644 index 1799ea1745..0000000000 --- a/docs/sources/shared/deploy-agent.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -aliases: -- /docs/agent/shared/deploy-agent/ -- /docs/grafana-cloud/agent/shared/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/shared/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/shared/deploy-agent/ -description: Shared content, deployment topologies for Grafana Agent -headless: true -title: Deploy Grafana Agent ---- - -# Deploy Grafana Agent - -Grafana Agent is a flexible, vendor-neutral telemetry collector. This -flexibility means that Grafana Agent doesn’t enforce a specific deployment topology -but can work in multiple scenarios. - -This page lists common topologies used for deployments of Grafana Agent, when -to consider using each topology, issues you may run into, and scaling -considerations. - -## As a centralized collection service -Deploying Grafana Agent as a centralized service is recommended for -collecting application telemetry. This topology allows you to use a smaller number of agents to -coordinate service discovery, collection, and remote writing. - -![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) - -Using this topology requires deploying the Agent on separate infrastructure, -and making sure that agents can discover and reach these applications over the -network. The main predictor for the size of the agent is the number of active -metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each -series. We recommend you start looking towards horizontal scaling around the 1 million -active series mark. - -### Using Kubernetes StatefulSets -Deploying Grafana Agent as a StatefulSet is the recommended option for metrics -collection. -The persistent pod identifiers make it possible to consistently match volumes -with pods so that you can use them for the WAL directory. - -You can also use a Kubernetes deployment in cases where persistent storage is not required, such as a traces-only pipeline. - -### Pros -* Straightforward scaling using [clustering][] or [hashmod sharding][] -* Minimizes the “noisy neighbor” effect -* Easy to meta-monitor - -### Cons -* Requires running on separate infrastructure - -### Use for -* Scalable telemetry collection - -### Don’t use for -* Host-level metrics and logs - -## As a host daemon -Deploying one Grafana Agent per machine is required for collecting -machine-level metrics and logs, such as node_exporter hardware and network -metrics or journald system logs. - -![daemonset](/media/docs/agent/agent-topologies/daemonset.png) - -Each Grafana Agent requires you to open an outgoing connection for each remote endpoint -it’s shipping data to. This can lead to NAT port exhaustion on the egress -infrastructure. Each egress IP can support up to (65535 - 1024 = 64511) -outgoing connections on different ports. So, if all agents are shipping metrics -and log data, an egress IP can support up to 32,255 agents. - -### Using Kubernetes DaemonSets -The simplest use case of the host daemon topology is a Kubernetes DaemonSet, -and it is required for node-level observability (for example cAdvisor metrics) and -collecting pod logs. - -### Pros -* Doesn’t require running on separate infrastructure -* Typically leads to smaller-sized agents -* Lower network latency to instrumented applications - -### Cons -* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift -* Not possible to scale agents independently when using Kubernetes DaemonSets -* Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) - -### Use for -* Collecting machine-level metrics and logs (for example, node_exporter hardware metrics, Kubernetes pod logs) - -### Don’t use for -* Scenarios where Grafana Agent grows so large it can become a noisy neighbor -* Collecting an unpredictable amount of telemetry - -## As a container sidecar -Deploying Grafana Agent as a container sidecar is only recommended for -short-lived applications or specialized agent deployments. - -![daemonset](/media/docs/agent/agent-topologies/sidecar.png) - -### Using Kubernetes pod sidecars -In a Kubernetes environment, the sidecar model consists of deploying Grafana Agent -as an extra container on the pod. The pod’s controller, network configuration, -enabled capabilities, and available resources are shared between the actual -application and the sidecar agent. - -### Pros -* Doesn’t require running on separate infrastructure -* Straightforward networking with partner applications - -### Cons -* Doesn’t scale separately -* Makes resource consumption harder to monitor and predict -* Agents do not have a life cycle of their own, making it harder to reason about things like recovering from network outages - -### Use for -* Serverless services -* Job/batch applications that work with a push model -* Air-gapped applications that can’t be otherwise reached over the network - -### Don’t use for -* Long-lived applications -* Scenarios where the agent size grows so large it can become a noisy neighbor - -[hashmod sharding]: {{< relref "../static/operation-guide/_index.md" >}} -[clustering]: {{< relref "../flow/concepts/clustering.md" >}} diff --git a/docs/sources/shared/deploy-alloy.md b/docs/sources/shared/deploy-alloy.md new file mode 100644 index 0000000000..6c86f737ba --- /dev/null +++ b/docs/sources/shared/deploy-alloy.md @@ -0,0 +1,123 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/deploy-alloy/ +description: Shared content, deployment topologies for Grafana Alloy +headless: true +title: Deploy Grafana Alloy +--- + +# Deploy {{% param "PRODUCT_NAME" %}} + +{{< param "PRODUCT_NAME" >}} is a flexible, vendor-neutral telemetry collector. +This flexibility means that {{< param "PRODUCT_NAME" >}} doesn’t enforce a specific deployment topology but can work in multiple scenarios. + +This page lists common topologies used for deployments of {{% param "PRODUCT_NAME" %}}, when to consider using each topology, issues you may run into, and scaling considerations. + +## As a centralized collection service + +Deploying {{< param "PRODUCT_NAME" >}} as a centralized service is recommended for collecting application telemetry. +This topology allows you to use a smaller number of agents to coordinate service discovery, collection, and remote writing. + +![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) + +Using this topology requires deploying the Agent on separate infrastructure, and making sure that agents can discover and reach these applications over the network. +The main predictor for the size of the agent is the number of active metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each series. +We recommend you start looking towards horizontal scaling around the 1 million active series mark. + +### Using Kubernetes StatefulSets + +Deploying {{< param "PRODUCT_NAME" >}} as a StatefulSet is the recommended option for metrics collection. +The persistent Pod identifiers make it possible to consistently match volumes with pods so that you can use them for the WAL directory. + +You can also use a Kubernetes Deployment in cases where persistent storage isn't required, such as a traces-only pipeline. + +### Pros + +* Straightforward scaling using [clustering][] or [hashmod sharding][] +* Minimizes the “noisy neighbor” effect +* Easy to meta-monitor + +### Cons + +* Requires running on separate infrastructure + +### Use for + +* Scalable telemetry collection + +### Don’t use for + +* Host-level metrics and logs + +## As a host daemon + +Deploying one {{< param "PRODUCT_NAME" >}} per machine is required for collecting machine-level metrics and logs, such as node_exporter hardware and network metrics or journald system logs. + +![daemonset](/media/docs/agent/agent-topologies/daemonset.png) + +Each {{< param "PRODUCT_NAME" >}} requires you to open an outgoing connection for each remote endpoint it’s shipping data to. +This can lead to NAT port exhaustion on the egress infrastructure. +Each egress IP can support up to (65535 - 1024 = 64511) outgoing connections on different ports. +So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 agents. + +### Using Kubernetes DaemonSets + +The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and it's required for node-level observability (for example cAdvisor metrics) and collecting Pod logs. + +### Pros + +* Doesn’t require running on separate infrastructure +* Typically leads to smaller-sized agents +* Lower network latency to instrumented applications + +### Cons + +* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift +* Not possible to scale agents independently when using Kubernetes DaemonSets +* Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) + +### Use for + +* Collecting machine-level metrics and logs (for example, node_exporter hardware metrics, Kubernetes Pod logs) + +### Don’t use for + +* Scenarios where Grafana Agent grows so large it can become a noisy neighbor +* Collecting an unpredictable amount of telemetry + +## As a container sidecar + +Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized agent deployments. + +![daemonset](/media/docs/agent/agent-topologies/sidecar.png) + +### Using Kubernetes Pod sidecars + +In a Kubernetes environment, the sidecar model consists of deploying {{< param "PRODUCT_NAME" >}} as an extra container on the Pod. +The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar agent. + +### Pros + +* Doesn’t require running on separate infrastructure +* Straightforward networking with partner applications + +### Cons + +* Doesn’t scale separately +* Makes resource consumption harder to monitor and predict +* {{< param "PRODUCT_NAME" >}}s don't have a life cycle of their own, making it harder to reason about things like recovering from network outages + +### Use for + +* Serverless services +* Job/batch applications that work with a push model +* Air-gapped applications that can’t be otherwise reached over the network + +### Don’t use for + +* Long-lived applications +* Scenarios where the agent size grows so large it can become a noisy neighbor + + +[hashmod sharding]: https://grafana.com/docs/agent/latest/static/operation-guide/ + +[clustering]: ../../concepts/clustering/ diff --git a/docs/sources/shared/flow/reference/components/azuread-block.md b/docs/sources/shared/flow/reference/components/azuread-block.md deleted file mode 100644 index 07d9743851..0000000000 --- a/docs/sources/shared/flow/reference/components/azuread-block.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/azuread-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/azuread-block/ -description: Shared content, azuread block -headless: true ---- - -Name | Type | Description | Default | Required ---------|----------|------------------|-----------------|--------- -`cloud` | `string` | The Azure Cloud. | `"AzurePublic"` | no - -The supported values for `cloud` are: -* `"AzurePublic"` -* `"AzureChina"` -* `"AzureGovernment"` diff --git a/docs/sources/shared/flow/reference/components/exporter-component-exports.md b/docs/sources/shared/flow/reference/components/exporter-component-exports.md deleted file mode 100644 index f1a8ca440c..0000000000 --- a/docs/sources/shared/flow/reference/components/exporter-component-exports.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/exporter-component-exports/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/exporter-component-exports/ -description: Shared content, exporter component exports -headless: true ---- - -The following fields are exported and can be referenced by other components. - -Name | Type | Description -----------|---------------------|---------------------------------------------------------- -`targets` | `list(map(string))` | The targets that can be used to collect exporter metrics. - -For example, the `targets` can either be passed to a `discovery.relabel` component to rewrite the targets' label sets or to a `prometheus.scrape` component that collects the exposed metrics. - -The exported targets use the configured [in-memory traffic][] address specified by the [run command][]. - -[in-memory traffic]: {{< relref "../../../../flow/concepts/component_controller.md#in-memory-traffic" >}} -[run command]: {{< relref "../../../../flow/reference/cli/run.md" >}} diff --git a/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md b/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md deleted file mode 100644 index 700b0dd2cc..0000000000 --- a/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/http-client-proxy-config-description-args/ -description: Shared content, http client config description -headless: true ---- - -`no_proxy` can contain IPs, CIDR notations, and domain names. IP and domain -names can contain port numbers. `proxy_url` must be configured if `no_proxy` -is configured. - -`proxy_from_environment` uses the environment variables HTTP_PROXY, HTTPS_PROXY -and NO_PROXY (or the lowercase versions thereof). Requests use the proxy from -the environment variable matching their scheme, unless excluded by NO_PROXY. -`proxy_url` and `no_proxy` must not be configured if `proxy_from_environment` -is configured. - -`proxy_connect_header` should only be configured if `proxy_url` or `proxy_from_environment` are configured. \ No newline at end of file diff --git a/docs/sources/shared/flow/reference/components/otelcol-compression-field.md b/docs/sources/shared/flow/reference/components/otelcol-compression-field.md deleted file mode 100644 index 394cf1077c..0000000000 --- a/docs/sources/shared/flow/reference/components/otelcol-compression-field.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-compression-field/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-compression-field/ -description: Shared content, otelcol compression field -headless: true ---- - -By default, requests are compressed with gzip. -The `compression` argument controls which compression mechanism to use. Supported strings are: - -* `"gzip"` -* `"zlib"` -* `"deflate"` -* `"snappy"` -* `"zstd"` - -If `compression` is set to `"none"` or an empty string `""`, no compression is used. diff --git a/docs/sources/shared/flow/stability/beta.md b/docs/sources/shared/flow/stability/beta.md deleted file mode 100644 index c337059f00..0000000000 --- a/docs/sources/shared/flow/stability/beta.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/send-data/agent/shared/flow/stability/beta/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/stability/beta/ -description: Shared content, beta -headless: true ---- - -> **BETA**: This is a [beta][] component. Beta components are subject to breaking -> changes, and may be replaced with equivalent functionality that cover the -> same use case. - -[beta]: {{< relref "../../../stability.md#beta" >}} diff --git a/docs/sources/shared/flow/stability/experimental.md b/docs/sources/shared/flow/stability/experimental.md deleted file mode 100644 index 95d0136400..0000000000 --- a/docs/sources/shared/flow/stability/experimental.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/send-data/agent/shared/flow/stability/experimental/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/stability/experimental/ -description: Shared content, experimental -headless: true ---- - -> **EXPERIMENTAL**: This is an [experimental][] component. Experimental -> components are subject to frequent breaking changes, and may be removed with -> no equivalent replacement. - -[experimental]: {{< relref "../../../stability.md#experimental" >}} diff --git a/docs/sources/shared/index.md b/docs/sources/shared/index.md index 8b1094f12e..c061db6fd4 100644 --- a/docs/sources/shared/index.md +++ b/docs/sources/shared/index.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/ -- /docs/grafana-cloud/send-data/agent/shared/ -canonical: https://grafana.com/docs/agent/latest/shared/ +canonical: https://grafana.com/docs/alloy/latest/shared/ description: Shared content headless: true --- diff --git a/docs/sources/shared/flow/reference/components/authorization-block.md b/docs/sources/shared/reference/components/authorization-block.md similarity index 53% rename from docs/sources/shared/flow/reference/components/authorization-block.md rename to docs/sources/shared/reference/components/authorization-block.md index 11a74326f9..2c1f8a4354 100644 --- a/docs/sources/shared/flow/reference/components/authorization-block.md +++ b/docs/sources/shared/reference/components/authorization-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/authorization-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/authorization-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/authorization-block/ description: Shared content, authorization block headless: true --- diff --git a/docs/sources/shared/reference/components/azuread-block.md b/docs/sources/shared/reference/components/azuread-block.md new file mode 100644 index 0000000000..461402a5c9 --- /dev/null +++ b/docs/sources/shared/reference/components/azuread-block.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/azuread-block/ +description: Shared content, azuread block +headless: true +--- + +Name | Type | Description | Default | Required +--------|----------|------------------|-----------------|--------- +`cloud` | `string` | The Azure Cloud. | `"AzurePublic"` | no + +The supported values for `cloud` are: +* `"AzurePublic"` +* `"AzureChina"` +* `"AzureGovernment"` diff --git a/docs/sources/shared/flow/reference/components/basic-auth-block.md b/docs/sources/shared/reference/components/basic-auth-block.md similarity index 52% rename from docs/sources/shared/flow/reference/components/basic-auth-block.md rename to docs/sources/shared/reference/components/basic-auth-block.md index 62f7e0a25d..8ff77ae4e6 100644 --- a/docs/sources/shared/flow/reference/components/basic-auth-block.md +++ b/docs/sources/shared/reference/components/basic-auth-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/basic-auth-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/basic-auth-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/basic-auth-block/ description: Shared content, basic auth block headless: true --- diff --git a/docs/sources/shared/reference/components/exporter-component-exports.md b/docs/sources/shared/reference/components/exporter-component-exports.md new file mode 100644 index 0000000000..56867bfd0b --- /dev/null +++ b/docs/sources/shared/reference/components/exporter-component-exports.md @@ -0,0 +1,18 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/exporter-component-exports/ +description: Shared content, exporter component exports +headless: true +--- + +The following fields are exported and can be referenced by other components. + +Name | Type | Description +----------|---------------------|---------------------------------------------------------- +`targets` | `list(map(string))` | The targets that can be used to collect exporter metrics. + +For example, the `targets` can either be passed to a `discovery.relabel` component to rewrite the targets' label sets or to a `prometheus.scrape` component that collects the exposed metrics. + +The exported targets use the configured [in-memory traffic][] address specified by the [run command][]. + +[in-memory traffic]: ../../../concepts/component_controller/#in-memory-traffic +[run command]: ../../../reference/cli/run/ diff --git a/docs/sources/shared/flow/reference/components/extract-field-block.md b/docs/sources/shared/reference/components/extract-field-block.md similarity index 78% rename from docs/sources/shared/flow/reference/components/extract-field-block.md rename to docs/sources/shared/reference/components/extract-field-block.md index 207f2bc605..5946439435 100644 --- a/docs/sources/shared/flow/reference/components/extract-field-block.md +++ b/docs/sources/shared/reference/components/extract-field-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/extract-field-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/extract-field-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/extract-field-block/ description: Shared content, extract field block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/field-filter-block.md b/docs/sources/shared/reference/components/field-filter-block.md similarity index 58% rename from docs/sources/shared/flow/reference/components/field-filter-block.md rename to docs/sources/shared/reference/components/field-filter-block.md index 266af75980..f9d862a871 100644 --- a/docs/sources/shared/flow/reference/components/field-filter-block.md +++ b/docs/sources/shared/reference/components/field-filter-block.md @@ -1,12 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/field-filter-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/field-filter-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/filter-field-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/filter-field-block/ description: Shared content, filter field block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/http-client-config-block.md b/docs/sources/shared/reference/components/http-client-config-block.md similarity index 67% rename from docs/sources/shared/flow/reference/components/http-client-config-block.md rename to docs/sources/shared/reference/components/http-client-config-block.md index a115d031b2..8a41b288d1 100644 --- a/docs/sources/shared/flow/reference/components/http-client-config-block.md +++ b/docs/sources/shared/reference/components/http-client-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/http-client-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/http-client-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/http-client-config-block/ description: Shared content, http client config block headless: true --- @@ -23,4 +17,4 @@ Name | Type | Description `bearer_token`, `bearer_token_file`, `basic_auth`, `authorization`, and `oauth2` are mutually exclusive, and only one can be provided inside of a `http_client_config` block. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} \ No newline at end of file +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} diff --git a/docs/sources/shared/reference/components/http-client-proxy-config-description.md b/docs/sources/shared/reference/components/http-client-proxy-config-description.md new file mode 100644 index 0000000000..5da832a3a6 --- /dev/null +++ b/docs/sources/shared/reference/components/http-client-proxy-config-description.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/http-client-proxy-config-description-args/ +description: Shared content, http client config description +headless: true +--- + +`no_proxy` can contain IPs, CIDR notations, and domain names. IP and domain names can contain port numbers. +`proxy_url` must be configured if `no_proxy` is configured. + +`proxy_from_environment` uses the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof). +Requests use the proxy from the environment variable matching their scheme, unless excluded by NO_PROXY. +`proxy_url` and `no_proxy` must not be configured if `proxy_from_environment` is configured. + +`proxy_connect_header` should only be configured if `proxy_url` or `proxy_from_environment` are configured. diff --git a/docs/sources/shared/flow/reference/components/local-file-arguments-text.md b/docs/sources/shared/reference/components/local-file-arguments-text.md similarity index 64% rename from docs/sources/shared/flow/reference/components/local-file-arguments-text.md rename to docs/sources/shared/reference/components/local-file-arguments-text.md index 4b83c2291c..ccae890132 100644 --- a/docs/sources/shared/flow/reference/components/local-file-arguments-text.md +++ b/docs/sources/shared/reference/components/local-file-arguments-text.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/local-file-arguments-text/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/local-file-arguments-text/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/local-file-arguments-text/ description: Shared content, local file arguments text headless: true --- diff --git a/docs/sources/shared/flow/reference/components/loki-server-grpc.md b/docs/sources/shared/reference/components/loki-server-grpc.md similarity index 80% rename from docs/sources/shared/flow/reference/components/loki-server-grpc.md rename to docs/sources/shared/reference/components/loki-server-grpc.md index ffb0081ec3..9cfca25b99 100644 --- a/docs/sources/shared/flow/reference/components/loki-server-grpc.md +++ b/docs/sources/shared/reference/components/loki-server-grpc.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/loki-server-grpc/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/loki-server-grpc/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/loki-server-grpc/ description: Shared content, loki server grpc headless: true --- diff --git a/docs/sources/shared/flow/reference/components/loki-server-http.md b/docs/sources/shared/reference/components/loki-server-http.md similarity index 73% rename from docs/sources/shared/flow/reference/components/loki-server-http.md rename to docs/sources/shared/reference/components/loki-server-http.md index a418dbd892..e0510dcd70 100644 --- a/docs/sources/shared/flow/reference/components/loki-server-http.md +++ b/docs/sources/shared/reference/components/loki-server-http.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/loki-server-http/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/loki-server-http/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/loki-server-http/ description: Shared content, loki server http headless: true --- diff --git a/docs/sources/shared/flow/reference/components/managed_identity-block.md b/docs/sources/shared/reference/components/managed_identity-block.md similarity index 55% rename from docs/sources/shared/flow/reference/components/managed_identity-block.md rename to docs/sources/shared/reference/components/managed_identity-block.md index 2e51a03050..fe255aa3de 100644 --- a/docs/sources/shared/flow/reference/components/managed_identity-block.md +++ b/docs/sources/shared/reference/components/managed_identity-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/managed_identity-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/managed_identity-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/managed_identity-block/ description: Shared content, managed_identity block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/match-properties-block.md b/docs/sources/shared/reference/components/match-properties-block.md similarity index 71% rename from docs/sources/shared/flow/reference/components/match-properties-block.md rename to docs/sources/shared/reference/components/match-properties-block.md index 863f4e406b..0b16d7042e 100644 --- a/docs/sources/shared/flow/reference/components/match-properties-block.md +++ b/docs/sources/shared/reference/components/match-properties-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/match-properties-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/match-properties-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/match-properties-block/ description: Shared content, match properties block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/oauth2-block.md b/docs/sources/shared/reference/components/oauth2-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/oauth2-block.md rename to docs/sources/shared/reference/components/oauth2-block.md index bba91c84a7..75515bbecf 100644 --- a/docs/sources/shared/flow/reference/components/oauth2-block.md +++ b/docs/sources/shared/reference/components/oauth2-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/oauth2-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/oauth2-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/oauth2-block/ description: Shared content, oauth2 block headless: true --- @@ -27,4 +21,4 @@ Name | Type | Description The `oauth2` block may also contain a separate `tls_config` sub-block. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} \ No newline at end of file +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} diff --git a/docs/sources/shared/reference/components/otelcol-compression-field.md b/docs/sources/shared/reference/components/otelcol-compression-field.md new file mode 100644 index 0000000000..2ae80b4387 --- /dev/null +++ b/docs/sources/shared/reference/components/otelcol-compression-field.md @@ -0,0 +1,16 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-compression-field/ +description: Shared content, otelcol compression field +headless: true +--- + +By default, requests are compressed with gzip. +The `compression` argument controls which compression mechanism to use. Supported strings are: + +* `"gzip"` +* `"zlib"` +* `"deflate"` +* `"snappy"` +* `"zstd"` + +If `compression` is set to `"none"` or an empty string `""`, no compression is used. diff --git a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md b/docs/sources/shared/reference/components/otelcol-debug-metrics-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md rename to docs/sources/shared/reference/components/otelcol-debug-metrics-block.md index 2997d8c140..704c6e2776 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md +++ b/docs/sources/shared/reference/components/otelcol-debug-metrics-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-debug-metrics-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-debug-metrics-block/ description: Shared content, otelcol debug metrics block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md b/docs/sources/shared/reference/components/otelcol-filter-attribute-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md rename to docs/sources/shared/reference/components/otelcol-filter-attribute-block.md index b4226ada23..c939fe0188 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-attribute-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ description: Shared content, otelcol filter attribute block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md b/docs/sources/shared/reference/components/otelcol-filter-library-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md rename to docs/sources/shared/reference/components/otelcol-filter-library-block.md index b2d4b5ddac..2467e466b1 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-library-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-library-block/ description: Shared content, otelcol filter library block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md b/docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md rename to docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md index 35633a4d6b..c0a5b02daf 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ description: Shared content, otelcol filter log severity block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md b/docs/sources/shared/reference/components/otelcol-filter-regexp-block.md similarity index 64% rename from docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md rename to docs/sources/shared/reference/components/otelcol-filter-regexp-block.md index d265dffc60..706de5f28d 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-regexp-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ description: Shared content, otelcol filter regexp block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md b/docs/sources/shared/reference/components/otelcol-filter-resource-block.md similarity index 58% rename from docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md rename to docs/sources/shared/reference/components/otelcol-filter-resource-block.md index 446bdac82a..ec15864e79 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-resource-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-resource-block/ description: Shared content, otelcol filter resource block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md b/docs/sources/shared/reference/components/otelcol-grpc-authority.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md rename to docs/sources/shared/reference/components/otelcol-grpc-authority.md index 15642a0b91..a3905820ad 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md +++ b/docs/sources/shared/reference/components/otelcol-grpc-authority.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-grpc-authority/ description: Shared content, otelcol grpc authority headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md b/docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md similarity index 60% rename from docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md rename to docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md index 4c30602ad4..bcff954555 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md +++ b/docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ description: Shared content, otelcol grpc balancer name headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-queue-block.md b/docs/sources/shared/reference/components/otelcol-queue-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/otelcol-queue-block.md rename to docs/sources/shared/reference/components/otelcol-queue-block.md index 09bbf8205a..a7fbde5804 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-queue-block.md +++ b/docs/sources/shared/reference/components/otelcol-queue-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-queue-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-queue-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-queue-block/ description: Shared content, otelcol queue block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-retry-block.md b/docs/sources/shared/reference/components/otelcol-retry-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/otelcol-retry-block.md rename to docs/sources/shared/reference/components/otelcol-retry-block.md index 546947f12c..95900714c3 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-retry-block.md +++ b/docs/sources/shared/reference/components/otelcol-retry-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-retry-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-retry-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-retry-block/ description: Shared content, otelcol retry block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md b/docs/sources/shared/reference/components/otelcol-tls-config-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md rename to docs/sources/shared/reference/components/otelcol-tls-config-block.md index caf4d45001..a1d0086043 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md +++ b/docs/sources/shared/reference/components/otelcol-tls-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-tls-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-tls-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-tls-config-block/ description: Shared content, otelcol tls config block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-logs.md b/docs/sources/shared/reference/components/output-block-logs.md similarity index 55% rename from docs/sources/shared/flow/reference/components/output-block-logs.md rename to docs/sources/shared/reference/components/output-block-logs.md index fbdc891362..f340cc42dc 100644 --- a/docs/sources/shared/flow/reference/components/output-block-logs.md +++ b/docs/sources/shared/reference/components/output-block-logs.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block-logs/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-logs/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block-logs/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block-logs/ description: Shared content, output block logs headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-metrics.md b/docs/sources/shared/reference/components/output-block-metrics.md similarity index 56% rename from docs/sources/shared/flow/reference/components/output-block-metrics.md rename to docs/sources/shared/reference/components/output-block-metrics.md index 25818897ca..5b0d883427 100644 --- a/docs/sources/shared/flow/reference/components/output-block-metrics.md +++ b/docs/sources/shared/reference/components/output-block-metrics.md @@ -1,10 +1,4 @@ --- -aliases: -- ../../otelcol/output-block-metrics/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-metrics/ description: Shared content, output block metrics headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-traces.md b/docs/sources/shared/reference/components/output-block-traces.md similarity index 55% rename from docs/sources/shared/flow/reference/components/output-block-traces.md rename to docs/sources/shared/reference/components/output-block-traces.md index 124f42115d..bb39d85a07 100644 --- a/docs/sources/shared/flow/reference/components/output-block-traces.md +++ b/docs/sources/shared/reference/components/output-block-traces.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block-traces/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-traces/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block-traces/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block-traces/ description: Shared content, output block traces headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block.md b/docs/sources/shared/reference/components/output-block.md similarity index 63% rename from docs/sources/shared/flow/reference/components/output-block.md rename to docs/sources/shared/reference/components/output-block.md index c3ad30e782..4d0d196a09 100644 --- a/docs/sources/shared/flow/reference/components/output-block.md +++ b/docs/sources/shared/reference/components/output-block.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block/ description: Shared content, output block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/prom-operator-scrape.md b/docs/sources/shared/reference/components/prom-operator-scrape.md similarity index 68% rename from docs/sources/shared/flow/reference/components/prom-operator-scrape.md rename to docs/sources/shared/reference/components/prom-operator-scrape.md index 156198d17b..03f939b061 100644 --- a/docs/sources/shared/flow/reference/components/prom-operator-scrape.md +++ b/docs/sources/shared/reference/components/prom-operator-scrape.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/prom-operator-scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/prom-operator-scrape/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/prom-operator-scrape/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/prom-operator-scrape/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/prom-operator-scrape/ description: Shared content, prom operator scrape headless: true --- diff --git a/docs/sources/shared/flow/reference/components/rule-block-logs.md b/docs/sources/shared/reference/components/rule-block-logs.md similarity index 87% rename from docs/sources/shared/flow/reference/components/rule-block-logs.md rename to docs/sources/shared/reference/components/rule-block-logs.md index 3db6449ed1..6fdc772d61 100644 --- a/docs/sources/shared/flow/reference/components/rule-block-logs.md +++ b/docs/sources/shared/reference/components/rule-block-logs.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/rule-block-logs/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/rule-block-logs/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/rule-block-logs/ description: Shared content, rule block logs headless: true --- diff --git a/docs/sources/shared/flow/reference/components/rule-block.md b/docs/sources/shared/reference/components/rule-block.md similarity index 87% rename from docs/sources/shared/flow/reference/components/rule-block.md rename to docs/sources/shared/reference/components/rule-block.md index 614b062b0e..e59d5047d4 100644 --- a/docs/sources/shared/flow/reference/components/rule-block.md +++ b/docs/sources/shared/reference/components/rule-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/rule-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/rule-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/rule-block/ description: Shared content, rule block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/sigv4-block.md b/docs/sources/shared/reference/components/sigv4-block.md similarity index 65% rename from docs/sources/shared/flow/reference/components/sigv4-block.md rename to docs/sources/shared/reference/components/sigv4-block.md index 54598570ad..7c14c3d614 100644 --- a/docs/sources/shared/flow/reference/components/sigv4-block.md +++ b/docs/sources/shared/reference/components/sigv4-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/sigv4-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/sigv4-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/sigv4-block/ description: Shared content, sigv4 block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/tls-config-block.md b/docs/sources/shared/reference/components/tls-config-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/tls-config-block.md rename to docs/sources/shared/reference/components/tls-config-block.md index 1b92e91d0b..7c4b45145c 100644 --- a/docs/sources/shared/flow/reference/components/tls-config-block.md +++ b/docs/sources/shared/reference/components/tls-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/tls-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/tls-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/tls-config-block/ description: Shared content, tls config block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/write_relabel_config.md b/docs/sources/shared/reference/components/write_relabel_config.md similarity index 84% rename from docs/sources/shared/flow/reference/components/write_relabel_config.md rename to docs/sources/shared/reference/components/write_relabel_config.md index db06408464..dfa6084a7d 100644 --- a/docs/sources/shared/flow/reference/components/write_relabel_config.md +++ b/docs/sources/shared/reference/components/write_relabel_config.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/write-relabel-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/write-relabel-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/write-relabel-config-block/ description: Shared content, write_relabel_config block headless: true --- @@ -13,10 +7,8 @@ headless: true -The `write_relabel_config` block contains the definition of any relabeling -rules that can be applied to an input metric. -If more than one `write_relabel_config` block is defined, the transformations -are applied in top-down order. +The `write_relabel_config` block contains the definition of any relabeling rules that can be applied to an input metric. +If more than one `write_relabel_config` block is defined, the transformations are applied in top-down order. The following arguments can be used to configure a `write_relabel_config`. All arguments are optional. Omitted fields take their default values. diff --git a/docs/sources/shared/stability/beta.md b/docs/sources/shared/stability/beta.md new file mode 100644 index 0000000000..0935e5b70d --- /dev/null +++ b/docs/sources/shared/stability/beta.md @@ -0,0 +1,11 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/stability/beta/ +description: Shared content, beta +headless: true +--- + +> **BETA**: This is a [beta][] component. Beta components are subject to breaking +> changes, and may be replaced with equivalent functionality that cover the +> same use case. + +[beta]: ../../../stability/#beta diff --git a/docs/sources/shared/stability/experimental.md b/docs/sources/shared/stability/experimental.md new file mode 100644 index 0000000000..6028ba3cdd --- /dev/null +++ b/docs/sources/shared/stability/experimental.md @@ -0,0 +1,11 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/stability/experimental/ +description: Shared content, experimental +headless: true +--- + +> **EXPERIMENTAL**: This is an [experimental][] component. Experimental +> components are subject to frequent breaking changes, and may be removed with +> no equivalent replacement. + +[experimental]: ../../../stability/#experimental diff --git a/docs/sources/shared/wal-data-retention.md b/docs/sources/shared/wal-data-retention.md deleted file mode 100644 index e7fa388718..0000000000 --- a/docs/sources/shared/wal-data-retention.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -aliases: -- /docs/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/send-data/agent/shared/wal-data-retention/ -canonical: https://grafana.com/docs/agent/latest/shared/wal-data-retention/ -description: Shared content, information about data retention in the WAL -headless: true ---- - -The `prometheus.remote_write` component uses a Write Ahead Log (WAL) to prevent -data loss during network outages. The component buffers the received metrics in -a WAL for each configured endpoint. The queue shards can use the WAL after the -network outage is resolved and flush the buffered metrics to the endpoints. - -The WAL records metrics in 128 MB files called segments. To avoid having a WAL -that grows on-disk indefinitely, the component _truncates_ its segments on a -set interval. - -On each truncation, the WAL deletes references to series that are no longer -present and also _checkpoints_ roughly the oldest two thirds of the segments -(rounded down to the nearest integer) written to it since the last truncation -period. A checkpoint means that the WAL only keeps track of the unique -identifier for each existing metrics series, and can no longer use the samples -for remote writing. If that data has not yet been pushed to the remote -endpoint, it is lost. - -This behavior dictates the data retention for the `prometheus.remote_write` -component. It also means that it's impossible to directly correlate data -retention directly to the data age itself, as the truncation logic works on -_segments_, not the samples themselves. This makes data retention less -predictable when the component receives a non-consistent rate of data. - -The [WAL block][] in Flow mode, or the [metrics config][] in Static mode -contain some configurable parameters that can be used to control the tradeoff -between memory usage, disk usage, and data retention. - -The `truncate_frequency` or `wal_truncate_frequency` parameter configures the -interval at which truncations happen. A lower value leads to reduced memory -usage, but also provides less resiliency to long outages. - -When a WAL clean-up starts, the most recently successfully sent timestamp is -used to determine how much data is safe to remove from the WAL. -The `min_keepalive_time` or `min_wal_time` controls the minimum age of samples -considered for removal. No samples more recent than `min_keepalive_time` are -removed. The `max_keepalive_time` or `max_wal_time` controls the maximum age of -samples that can be kept in the WAL. Samples older than -`max_keepalive_time` are forcibly removed. - -### Extended `remote_write` outages -When the remote write endpoint is unreachable over a period of time, the most -recent successfully sent timestamp is not updated. The -`min_keepalive_time` and `max_keepalive_time` arguments control the age range -of data kept in the WAL. - -If the remote write outage is longer than the `max_keepalive_time` parameter, -then the WAL is truncated, and the oldest data is lost. - -### Intermittent `remote_write` outages -If the remote write endpoint is intermittently reachable, the most recent -successfully sent timestamp is updated whenever the connection is successful. -A successful connection updates the series' comparison with -`min_keepalive_time` and triggers a truncation on the next `truncate_frequency` -interval which checkpoints two thirds of the segments (rounded down to the -nearest integer) written since the previous truncation. - -### Falling behind -If the queue shards cannot flush data quickly enough to keep -up-to-date with the most recent data buffered in the WAL, we say that the -component is 'falling behind'. -It's not unusual for the component to temporarily fall behind 2 or 3 scrape intervals. -If the component falls behind more than one third of the data written since the -last truncate interval, it is possible for the truncate loop to checkpoint data -before being pushed to the remote_write endpoint. - -### WAL corruption - -WAL corruption can occur when Grafana Agent unexpectedly stops while the latest WAL segments -are still being written to disk. For example, the host computer has a general disk failure -and crashes before you can stop Grafana Agent and other running services. When you restart Grafana -Agent, it verifies the WAL, removing any corrupt segments it finds. Sometimes, this repair -is unsuccessful, and you must manually delete the corrupted WAL to continue. - -If the WAL becomes corrupted, Grafana Agent writes error messages such as -`err="failed to find segment for index"` to the log file. - -{{< admonition type="note" >}} -Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. -{{< /admonition >}} - -To delete the corrupted WAL: - -1. [Stop][] Grafana Agent. -1. Find and delete the contents of the `wal` directory. - - By default the `wal` directory is a subdirectory - of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory - may be different than the default depending on the [wal_directory][] setting in your Static configuration - file or the path specified by the Flow [command line flag][run] `--storage-path`. - - {{< admonition type="note" >}} - There is one `wal` directory per: - - * Metrics instance running in Static mode - * `prometheus.remote_write` component running in Flow mode - {{< /admonition >}} - -1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. - -[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block -[metrics config]: /docs/agent//static/configuration/metrics-config -[Stop]: /docs/agent//flow/get-started/start-agent -[wal_directory]: /docs/agent//static/configuration/metrics-config -[run]: /docs/agent//flow/reference/cli/run diff --git a/docs/sources/stability.md b/docs/sources/stability.md index c21d549aeb..2cdb1d0087 100644 --- a/docs/sources/stability.md +++ b/docs/sources/stability.md @@ -1,28 +1,22 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/stability/ -- /docs/grafana-cloud/send-data/agent/stability/ -canonical: https://grafana.com/docs/agent/latest/stability/ -description: Grafana Agent features fall into one of three stability categories, experimental, - beta, or stable +canonical: https://grafana.com/docs/alloy/latest/stability/ +description: Grafana Alloy features fall into one of three stability categories, experimental, beta, or stable title: Stability weight: 600 --- # Stability -Stability of functionality usually refers to the stability of a _use case,_ -such as collecting and forwarding OpenTelemetry metrics. +Stability of functionality usually refers to the stability of a _use case,_ such as collecting and forwarding OpenTelemetry metrics. -Features within the Grafana Agent project will fall into one of three stability -categories: +Features within the {{< param "PRODUCT_NAME" >}} project will fall into one of three stability categories: * **Experimental**: A new use case is being explored. * **Beta**: Functionality covering a use case is being matured. * **Stable**: Functionality covering a use case is believed to be stable. -The default stability is stable; features will be explicitly marked as -experimental or beta if they are not stable. +The default stability is stable. +Features are explicitly marked as experimental or beta if they aren't stable. ## Experimental @@ -37,22 +31,18 @@ Unless removed, experimental features eventually graduate to beta. ## Beta -The **beta** stability category is used to denote a feature which is being -matured. +The **beta** stability category is used to denote a feature which is being matured. * Beta features are subject to occasional breaking changes. -* Beta features can be replaced by equivalent functionality that covers the - same use case. +* Beta features can be replaced by equivalent functionality that covers the same use case. * Beta features can be used without enabling feature flags. -Unless replaced with equivalent functionality, beta features eventually -graduate to stable. +Unless replaced with equivalent functionality, beta features eventually graduate to stable. ## Stable The **stable** stability category is used to denote a feature as stable. * Breaking changes to stable features are rare, and will be well-documented. -* If new functionality is introduced to replace existing stable functionality, - deprecation and removal timeline will be well-documented. +* If new functionality is introduced to replace existing stable functionality, deprecation and removal timeline will be well-documented. * Stable features can be used without enabling feature flags. diff --git a/docs/sources/static/_index.md b/docs/sources/static/_index.md deleted file mode 100644 index 4ce1f42036..0000000000 --- a/docs/sources/static/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/ -- /docs/grafana-cloud/send-data/agent/static/ -canonical: https://grafana.com/docs/agent/latest/static/ -description: Learn about Grafana Agent in static mode -title: Static mode -weight: 200 ---- - -# Static mode - -Static mode is the original mode of Grafana Agent. -Static mode is composed of different _subsystems_: - -* The _metrics subsystem_ wraps around Prometheus for collecting Prometheus - metrics and forwarding them over the Prometheus `remote_write` protocol. - -* The _logs subsystem_ wraps around Grafana Promtail for collecting logs and - forwarding them to Grafana Loki. - -* The _traces subsystem_ wraps around OpenTelemetry Collector for collecting - traces and forwarding them to Grafana Tempo or any OpenTelemetry-compatible - endpoint. - -Static mode is [configured][configure] with a YAML file. - -Static mode works with: - -- Grafana Cloud -- Grafana Enterprise Stack -- OSS deployments of Grafana Loki, Grafana Mimir, Grafana Tempo, and Prometheus - -This topic helps you to think about what you're trying to accomplish and how to -use Grafana Agent to meet your goals. - -You can [set up][] and [configure][] Grafana Agent in static mode manually, or -you can follow the common workflows described in this topic. - -## Topics - -### Static mode Grafana Agent for Grafana Cloud integrations - -There are different ways for you to set up Grafana Agent to scrape -data—through Grafana's integration platform or directly. Select a guide -to get started: - -| Topic | Description | -|---|---| -| [Get started with monitoring using an integration](/docs/grafana-cloud/data-configuration/get-started-integration/) | Walk through installing a Linux integration using Grafana Agent in the Grafana Cloud interface. | -| [Install and manage integrations](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) | View general steps for using Grafana Cloud integrations to install Grafana Agent to collect data. See [supported integrations](/docs/grafana-cloud/data-configuration/integrations/integration-reference/). -| [Ship your metrics to Grafana Cloud without an integration](/docs/grafana-cloud/data-configuration/metrics/agent-config-exporter/) | If you want to ship your Prometheus metrics to Grafana Cloud but there isn’t an integration available, you can use a Prometheus exporter and deploy Grafana Agent to scrape your local machine or service. | -| [Change your metrics scrape interval](/docs/grafana-cloud/billing-and-usage/control-prometheus-metrics-usage/changing-scrape-interval/) | Learn about reducing your total data points per minute (DPM) by adjusting your scrape interval. | - -### Static mode Grafana Agent for Kubernetes Monitoring - -Grafana Kubernetes Monitoring provides a simplified approach to monitoring your Kubernetes fleet by deploying Grafana Agent with useful defaults for collecting metrics. Select a guide to get started monitoring Kubernetes: - -| Topic | Description | -|---|---| -| [Configure Kubernetes Monitoring using Agent](/docs/grafana-cloud/kubernetes-monitoring/configuration/) | Use the Kubernetes Monitoring solution to set up monitoring of your Kubernetes cluster and to install preconfigured dashboards and alerts. | -| [Ship Kubernetes traces using Grafana Agent directly](/docs/grafana-cloud/kubernetes-monitoring/other-methods/k8s-agent-traces/) | Deploy Grafana Agent into your Kubernetes cluster as a deployment and configure it to collect traces for your Kubernetes workloads. | - -### Use Grafana Agent directly to scrape telemetry data - -Grafana Cloud integration workflows and the Kubernetes Monitoring solution are the easiest ways to get started collecting telemetry data, but sometimes you might want to use a manual approach to set your configuration options. - -| Topic | Description | -|---|---| -| [Install or uninstall Grafana Agent][install] | Install or uninstall Grafana Agent. | -| [Troubleshoot Cloud Integrations installation on Linux](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshoot-linux/) | Troubleshoot common errors when executing the Grafana Agent installation script on Linux. | -| [Troubleshoot Cloud Integrations installation on Mac](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshoot-mac/) | Troubleshoot common errors when executing the Grafana Agent installation script on Mac. | -| [Troubleshoot Cloud Integrations installation on Windows](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshooting-windows/) | Troubleshoot common errors when executing the Grafana Agent installation script on Windows. | - -### Use Grafana Agent to send logs to Grafana Loki - -Logs are included when you [set up a Cloud integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations) but you can take a more hands-on approach with the following guide. - -| Topic | Description | -|---|---| -| [Collect logs with Grafana Agent](/docs/grafana-cloud/data-configuration/logs/collect-logs-with-agent/) | Install Grafana Agent to collect logs for use with Grafana Loki, included with your [Grafana Cloud account](/docs/grafana-cloud/account-management/cloud-portal/). | - -### Use Grafana Agent to send traces to Grafana Tempo - -| Topic | Description | -|---|---| -| [Set up and use tracing](/docs/grafana-cloud/data-configuration/traces/set-up-and-use-tempo/) | Install Grafana Agent to collect traces for use with Grafana Tempo, included with your [Grafana Cloud account](/docs/grafana-cloud/account-management/cloud-portal/). | -| [Use Grafana Agent as a tracing pipeline](/docs/tempo/latest/configuration/grafana-agent/) | Grafana Agent can be configured to run a set of tracing pipelines to collect data from your applications and write it to Grafana Tempo. Pipelines are built using OpenTelemetry, and consist of receivers, processors, and exporters. | - -{{% docs/reference %}} -[set up]: "/docs/agent/ -> /docs/agent//static/set-up" -[set up]: "/docs/grafana-cloud/ -> ./set-up" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> ./configuration" -[install]: "/docs/agent/ -> /docs/agent//static/set-up/install" -[install]: "/docs/grafana-cloud/ -> ./set-up/install" -{{% /docs/reference %}} diff --git a/docs/sources/static/api/_index.md b/docs/sources/static/api/_index.md deleted file mode 100644 index 1f6715e9d7..0000000000 --- a/docs/sources/static/api/_index.md +++ /dev/null @@ -1,539 +0,0 @@ ---- -aliases: -- ../api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/api/ -- /docs/grafana-cloud/send-data/agent/static/api/ -canonical: https://grafana.com/docs/agent/latest/static/api/ -description: Learn about the Grafana Agent static mode API -menuTitle: Static mode API -title: Static mode APIs (Stable) -weight: 400 ---- - -# Static mode APIs (Stable) - -The API for static mode is divided into several parts: - -- [Config Management API](#config-management-api-beta) -- [Agent API](#agent-api) -- [Integrations API](#integrations-api-experimental) -- [Ready/Healthy API](#ready--health-api) - -API endpoints are stable unless otherwise noted. - -## Config management API (Beta) - -Grafana Agent exposes a configuration management REST API for managing instance configurations when it's running in [scraping service mode][scrape]. - -{{< admonition type="note" >}} -The scraping service mode is a requirement for the configuration management -API, however this isn't a prerequisite for the Agent API or Ready/Healthy API. -{{< /admonition >}} - -The following endpoints are exposed: - -- List configs: [`GET /agent/api/v1/configs`](#list-configs) -- Get config: [`GET /agent/api/v1/configs/{name}`](#get-config) -- Update config: [`PUT /agent/api/v1/config/{name}`](#update-config) -- Delete config: [`DELETE /agent/api/v1/config/{name}`](#delete-config) - -{{< admonition type="note" >}} -If you are running Grafana Agent in a Docker container and you want to expose the API outside the Docker container, you must change the default HTTP listen address from `127.0.0.1:12345` to a valid network interface address. -You can change the HTTP listen address with the command-line flag: `-server.http.address=0.0.0.0:12345`. -For more information, refer to the [Server](https://grafana.com/docs/agent/latest/static/configuration/flags/#server) command-line flag documentation. - -You must also publish the port in Docker. Refer to [Published ports](https://docs.docker.com/network/#published-ports) in the Docker documentation for more information. -{{< /admonition >}} - -### API response - -All Config Management API endpoints will return responses in the following -form, unless an internal service error prevents the server from responding -properly: - -``` -{ - "status": "success" | "error", - "data": {} -} -``` - -Status will be either `success` or `error`. All 2xx responses will be -accompanied by a `success` value for the status field. 4xx and 5xx -responses will provide a value of `error`. All requests may potentially -return 500 on an internal error. Other non-500 responses will be documented -per API. - -The data field may or may not be present, depending on the endpoint. It -provides extra information for the query. The documentation for each endpoint -will describe the full response provided. - -### List configs - -``` -GET /agent/api/v1/configs -``` - -List configs returns a list of the named configurations currently known by the -underlying KV store. - -Status code: 200 on success. -Response: - -``` -{ - "status": "success", - "data": { - "configs": [ - // list of config names: - "a", - "b", - "c", - // ... - ] - } -} -``` - -### Get config - -``` -GET /agent/api/v1/configs/{name} -``` - -Get config returns a single configuration by name. The configuration must -exist or an error will be returned. URL-encoded names will be retrieved in decoded -form. e.g., `hello%2Fworld` will represent the config named `hello/world`. - -Status code: 200 on success, 400 on invalid config name. -Response on success: - -``` -{ - "status": "success", - "data": { - "value": "/* YAML configuration */" - } -} -``` - -### Update config - -``` -PUT /agent/api/v1/config/{name} -POST /agent/api/v1/config/{name} -``` - -Update config updates or adds a new configuration by name. If a configuration -with the same name already exists, then it will be completely overwritten. - -URL-encoded names are stored in decoded form. e.g., `hello%2Fworld` will -represent the config named `hello/world`. - -The request body passed to this endpoint must match the format of [metrics_instance_config][metrics] -defined in the Configuration Reference. The name field of the configuration is -ignored and the name in the URL takes precedence. The request body must be -formatted as YAML. - -{{< admonition type="warning" >}} -By default, all instance configuration files that read -credentials from a file on disk will be rejected. This prevents malicious users -from reading the contents of arbitrary files as passwords and sending their -contents to fake remote_write endpoints. To change the behavior, set -`dangerous_allow_reading_files` to true in the `scraping_service` block. -{{< /admonition >}} - -Status code: 201 with a new config, 200 on updated config. -Response on success: - -``` -{ - "status": "success" -} -``` - -### Delete config - -``` -DELETE /agent/api/v1/config/{name} -``` - -Delete config attempts to delete a configuration by name. The named -configuration must exist; deleting a nonexistent config will result in an -error. - -URL-encoded names will be interpreted in decoded form. e.g., `hello%2Fworld` -will represent the config named `hello/world`. - -Status code: 200 on success, 400 with invalid config name. -Response on success: - -``` -{ - "status": "success" -} -``` - -## Agent API - -### List current running instances of metrics subsystem - -``` -GET /agent/api/v1/metrics/instances -``` - -{{< admonition type="note" >}} -The deprecated alias is `/agent/api/v1/instances` -{{< /admonition >}} - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - - ] -} -``` - -### List current scrape targets of metrics subsystem - -``` -GET /agent/api/v1/metrics/targets -``` - -{{< admonition type="note" >}} -The deprecated alias is `/agent/api/v1/targets` -{{< /admonition >}} - -This endpoint collects all metrics subsystem targets known to the Agent across all -running instances. Only targets being scraped from the local Agent will be returned. If -running in scraping service mode, this endpoint must be invoked in all Agents -separately to get the combined set of targets across the whole Agent cluster. - -The `labels` fields shows the labels that will be added to metrics from the -target, while the `discovered_labels` field shows all labels found during -service discovery. - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": , - "target_group": , - "endpoint": - "state": , - "discovered_labels": { - "__address__": "
", - ... - }, - "labels": { - "label_a": "value_a", - ... - }, - "last_scrape": , - "scrape_duration_ms": , - "scrape_error": - }, - ... - ] -} -``` - -### Accept remote_write requests - -``` -POST /agent/api/v1/metrics/instance/{instance}/write -``` - -This endpoint accepts Prometheus-compatible remote_write POST requests, and -appends their contents into an instance's WAL. - -Replace `{instance}` with the name of the metrics instance from your config -file. For example, this block defines the "dev" and "prod" instances: - -```yaml -metrics: - configs: - - name: dev # /agent/api/v1/metrics/instance/dev/write - ... - - name: prod # /agent/api/v1/metrics/instance/prod/write - ... -``` - -Status code: 204 on success, 400 for bad requests related to the provided -instance or POST payload format and content, 500 for cases where appending -to the WAL failed. - -### List current running instances of logs subsystem - -``` -GET /agent/api/v1/logs/instances -``` - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - - ] -} -``` - -### List current scrape targets of logs subsystem - -``` -GET /agent/api/v1/logs/targets -``` - -This endpoint collects all logs subsystem targets known to the Agent across -all running instances. Only targets being scraped from Promtail will be returned. - -The `labels` fields shows the labels that will be added to metrics from the -target, while the `discovered_labels` field shows all labels found during -service discovery. - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": "default", - "target_group": "varlogs", - "type": "File", - "labels": { - "job": "varlogs" - }, - "discovered_labels": { - "__address__": "localhost", - "__path__": "/var/log/*log", - "job": "varlogs" - }, - "ready": true, - "details": { - "/var/log/alternatives.log": 13386, - "/var/log/apport.log": 0, - "/var/log/auth.log": 37009, - "/var/log/bootstrap.log": 107347, - "/var/log/dpkg.log": 374420, - "/var/log/faillog": 0, - "/var/log/fontconfig.log": 11629, - "/var/log/gpu-manager.log": 1541, - "/var/log/kern.log": 782582, - "/var/log/lastlog": 0, - "/var/log/syslog": 788450 - } - } - ] -} -``` - -### Reload configuration file (beta) - -This endpoint is currently in beta and may have issues. Please open any issues -you encounter. - -``` -GET /-/reload -POST /-/reload -``` - -This endpoint will re-read the configuration file from disk and refresh the -entire state of the Agent to reflect the new file on disk: - -- HTTP Server -- Prometheus metrics subsystem -- Loki logs subsystem -- Tempo traces subsystem -- Integrations - -Valid configurations will be applied to each of the subsystems listed above, and -`/-/reload` will return with a status code of 200 once all subsystems have been -updated. Malformed configuration files (invalid YAML, failed validation checks) -will be immediately rejected with a status code of 400. - -Well-formed configuration files can still be invalid for various reasons, such -as not having permissions to read the WAL directory. Issues such as these will -cause per-subsystem problems while reloading the configuration, and will leave -that subsystem in an undefined state. Specific errors encountered during reload -will be logged, and should be fixed before calling `/-/reload` again. - -Status code: 200 on success, 400 otherwise. - -### Show configuration file - -``` -GET /-/config -``` - -This endpoint prints out the currently loaded configuration the Agent is using. -The returned YAML has defaults applied, and only shows changes to the state that -validated successfully, so the results will not identically match the -configuration file on disk. - -Status code: 200 on success. - -### Generate support bundle -``` -GET /-/support?duration=N -``` - -This endpoint returns a 'support bundle', a zip file that contains information -about a running agent, and can be used as a baseline of information when trying -to debug an issue. - -The duration parameter is optional, must be less than or equal to the -configured HTTP server write timeout, and if not provided, defaults to it. -The endpoint is only exposed to the agent's HTTP server listen address, which -defaults to `localhost:12345`. - -The support bundle contains all information in plain text, so that it can be -inspected before sharing, to verify that no sensitive information has leaked. - -In addition, you can inspect the [supportbundle package](https://github.com/grafana/agent/tree/main/internal/static/supportbundle) -to verify the code that is being used to generate these bundles. - -A support bundle contains the following data: -* `agent-config.yaml` contains the current agent configuration (when the `-config.enable-read-api` flag is passed). -* `agent-logs.txt` contains the agent logs during the bundle generation. -* `agent-metadata.yaml` contains the agent's build version, operating system, architecture, uptime, plus a string payload defining which extra agent features have been enabled via command-line flags. -* `agent-metrics-instances.json` and `agent-metrics-targets.json` contain the active metric subsystem instances and the discovered scrape targets for each one. -* `agent-logs-instances.json` and `agent-logs-targets.json` contains the active logs subsystem instances and the discovered log targets for each one. -* `agent-metrics.txt` contains a snapshot of the agent's internal metrics. -* The `pprof/` directory contains Go runtime profiling data (CPU, heap, goroutine, mutex, block profiles) as exported by the pprof package. - -## Integrations API (Experimental) - -> **WARNING**: This API is currently only available when the experimental -> [integrations revamp][integrations] -> is enabled. Both the revamp and this API are subject to change while they -> are still experimental. - -### Integrations SD API - -``` -GET /agent/api/v1/metrics/integrations/sd -``` - -This endpoint returns all running metrics-based integrations. It conforms to -the Prometheus [http_sd_config API](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#http_sd_config). -Targets include integrations regardless of autoscrape being enabled; this -allows for manually configuring scrape jobs to collect metrics from an -integration running on an external agent. - -The following labels will be present on all returned targets: - -- `instance`: The unique instance ID of the running integration. -- `job`: `integrations/<__meta_agent_integration_name>` -- `agent_hostname`: `hostname:port` of the agent running the integration. -- `__meta_agent_integration_name`: The name of the integration. -- `__meta_agent_integration_instance`: The unique instance ID for the running integration. -- `__meta_agent_integration_autoscrape`: `1` if autoscrape is enabled for this integration, `0` otherwise. - -To reduce the load on the agent's HTTP server, the following query parameters -may also be provided to the URL: - -- `integrations`: Comma-delimited list of integrations to return. i.e., `agent,node_exporter`. -- `instance`: Return all integrations matching a specific value for instance. - -Status code: 200 if successful. -Response on success: - -``` -[ - { - "targets": [ "", ... ], - "labels": { - "": "", ... - } - }, - ... -] -``` - -### Integrations autoscrape targets - -``` -GET /agent/api/v1/metrics/integrations/targets -``` - -This endpoint returns all integrations for which autoscrape is enabled. The -response is identical to [`/agent/api/v1/metrics/targets`](#list-current-scrape-targets-of-logs-subsystem). - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": , - "target_group": , - "endpoint": - "state": , - "discovered_labels": { - "__address__": "
", - ... - }, - "labels": { - "label_a": "value_a", - ... - }, - "last_scrape": , - "scrape_duration_ms": , - "scrape_error": - }, - ... - ] -} -``` - -## Ready / health API - -### Readiness check - -``` -GET /-/ready -``` - -Status code: 200 if ready. - -Response: -``` -Agent is Ready. -``` - -### Healthiness check - -``` -GET /-/healthy -``` - -Status code: 200 if healthy. - -Response: -``` -Agent is Healthy. -``` - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ../configuration/scraping-service -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ../configuration/metrics-config" -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next" -[integrations]: "/docs/grafana-cloud/ -> ../configuration/integrations/integrations-next" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/_index.md b/docs/sources/static/configuration/_index.md deleted file mode 100644 index fa1a195bd6..0000000000 --- a/docs/sources/static/configuration/_index.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -aliases: -- ../configuration/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/ -- /docs/grafana-cloud/send-data/agent/static/configuration/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/ -description: Learn how to configure Grafana Agent in static mode -title: Configure static mode -weight: 300 ---- - -# Configure static mode - -The configuration of static mode is split across two places: - -* A YAML file -* [Command-line flags][flags] - -The YAML file is used to configure settings which are dynamic and can be -changed at runtime. The command-line flags then configure things which cannot -change at runtime, such as the listen port for the HTTP server. - -This file describes the YAML configuration, which is usually in a file named `config.yaml`. - -- [server_config][server] -- [metrics_config][metrics] -- [logs_config][logs] -- [traces_config][traces] -- [integrations_config][integrations] - -The configuration of Grafana Agent is "stable," but subject to breaking changes -as individual features change. Breaking changes to configuration will be -well-documented. - -## Updating configuration - -The configuration file can be reloaded at runtime using the `/-/reload` API -endpoint or sending a SIGHUP signal to the process. - -## Variable substitution - -You can use environment variables in the configuration file to set values that -need to be configurable during deployment. To enable this functionality, you -must pass `-config.expand-env` as a command-line flag to the Agent. - -To refer to an environment variable in the config file, use: - -``` -${VAR} -``` - -Where VAR is the name of the environment variable. - -Each variable reference is replaced at startup by the value of the environment -variable. The replacement is case-sensitive and occurs before the YAML file is -parsed. References to undefined variables are replaced by empty strings unless -you specify a default value or custom error text. - -To specify a default value, use: - -``` -${VAR:-default_value} -``` - -Where default_value is the value to use if the environment variable is -undefined. The full list of supported syntax can be found at Drone's -[envsubst repository](https://github.com/drone/envsubst). - -### Regex capture group references - -When using `-config.expand-env`, `VAR` must be an alphanumeric string with at -least one non-digit character. If `VAR` is a number, the expander will assume -you're trying to use a regex capture group reference, and will coerce the result -to be one. - -This means references in your config file like `${1}` will remain -untouched, but edge cases like `${1:-default}` will also be coerced to `${1}`, -which may be slightly unexpected. - -## Reloading (beta) - -The configuration file can be reloaded at runtime. Read the [API documentation][api] for more information. - -This functionality is in beta, and may have issues. Please open GitHub issues -for any problems you encounter. - -## File format - -To specify which configuration file to load, pass the `-config.file` flag at -the command line. The file is written in the [YAML -format](https://en.wikipedia.org/wiki/YAML), defined by the scheme below. -Brackets indicate that a parameter is optional. For non-list parameters the -value is set to the specified default. - -Generic placeholders are defined as follows: - -- ``: a boolean that can take the values `true` or `false` -- ``: any integer matching the regular expression `[1-9]+[0-9]*` -- ``: a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` -- ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` -- ``: a string of unicode characters -- ``: a valid path relative to current working directory or an - absolute path. -- ``: a valid string consisting of a hostname or IP followed by an optional port number -- ``: a regular string -- ``: a regular string that is a secret, such as a password - -Support contents and default values of `config.yaml`: - -```yaml -# Configures the server of the Agent used to enable self-scraping. -[server: ] - -# Configures metric collection. -# In previous versions of the agent, this field was called "prometheus". -[metrics: ] - -# Configures log collection. -# In previous versions of the agent, this field was called "loki". -[logs: ] - -# Configures Traces trace collection. -# In previous versions of the agent, this field was called "tempo". -[traces: ] - -# Configures integrations for the Agent. -[integrations: ] -``` - -## Remote Configuration (Experimental) - -An experimental feature for fetching remote configuration files over HTTP/S can be -enabled by passing the `-enable-features=remote-configs` flag at the command line. -With this feature enabled, you may pass an HTTP/S URL to the `-config.file` flag. - -The following flags will configure basic auth for requests made to HTTP/S remote config URLs: -- `-config.url.basic-auth-user `: the basic auth username -- `-config.url.basic-auth-password-file `: path to a file containing the basic auth password - -{{< admonition type="note" >}} -This beta feature is subject to change in future releases. -{{< /admonition >}} - -{{% docs/reference %}} -[flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" -[flags]: "/docs/grafana-cloud/ -> ./flags" -[server]: "/docs/agent/ -> /docs/agent//static/configuration/server-config" -[server]: "/docs/grafana-cloud/ -> ./server-config" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ./metrics-config" -[logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config" -[logs]: "/docs/grafana-cloud/ -> ./logs-config" -[traces]: "/docs/agent/ -> /docs/agent//static/configuration/traces-config" -[traces]: "/docs/grafana-cloud/ -> ./traces-config" -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" -[integrations]: "/docs/grafana-cloud/ -> ./integrations" -[api]: "/docs/agent/ -> /docs/agent//static/api#reload-configuration-file-beta" -[api]: "/docs/grafana-cloud/ -> ../api#reload-configuration-file-beta" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/agent-management.md b/docs/sources/static/configuration/agent-management.md deleted file mode 100644 index af327bb17b..0000000000 --- a/docs/sources/static/configuration/agent-management.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/agent-management/ -- /docs/grafana-cloud/send-data/agent/static/configuration/agent-management/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/agent-management/ -description: Learn about Agent Management -menuTitle: Agent Management -title: Agent Management - Experimental -weight: 700 ---- - -# Agent Management - Experimental - -**Agent Management is under active development. Backwards incompatible changes to its API are to be expected. Feedback is much appreciated. This is a feature that MAY NOT make it production.** - -Agent Management enables centralized management of fleets of Grafana Agents. - -In this mode, Grafana Agent polls and dynamically reloads its configuration from a remote API server. - -Remote Configurations are composed of a base configuration and a set of snippets. Snippets are applied conditionally via label matching. - -## Configuration - -Agent Management can be used by passing the flag `-enable-features=agent-management`. When enabled, the file referred to `-config.file` will be loaded as an agent management configuration file. - -Agent Management configuration files are YAML documents which conform the following schema: - -```yaml -# Agent Management configuration. -agent_management: - # Host of the API server to connect to. - host: - - # Protocol to use when connecting to the API server (http|https). - protocol: - - # The polling interval for fetching the configuration. - polling_interval: - - # Sets the `Authorization` header on every request with the - # configured username and password. - basic_auth: - [ username: ] - [ password_file: ] - - # Optional proxy URL. - [ proxy_url: ] - - # Comma-separated string that can contain IPs, CIDR notation, domain names - # that should be excluded from proxying. IP and domain names can - # contain port numbers. - [ no_proxy: ] - - # Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) - [ proxy_from_environment: | default: false ] - - # Specifies headers to send to proxies during CONNECT requests. - [ proxy_connect_header: - [ : [, ...] ] ] - - # Fields specific to remote configuration. - remote_configuration: - # A path to a directory where the remote configuration will be cached. The directory must be writeable. - cache_location: - - # The namespace to use. - namespace: - - # Set of self-identifying labels used for snippet selection. - labels: - [ : ... ] - - # Whether to use labels from the label management service. If enabled, labels from the API supersede the ones configured in the agent. The agent_id field must be defined. - label_management_enabled: | default = false - - # A unique ID for the agent, which is used to identify the agent. - agent_id: - - # Whether to accept HTTP 304 Not Modified responses from the API server. If enabled, the agent will use the cached configuration if the API server responds with HTTP 304 Not Modified. You can set this argument to `false` for debugging or testing. - accept_http_not_modified: | default = true -``` - -## API - -Grafana Agents with Agent Management enabled continuously poll the API server for an up-to-date configuration. The API server is expected to implement a `GET /agent-management/api/agent/v2/namespace/:namespace/remote_config` HTTP endpoint returning a successful response with the following body format: - -```yaml -# The base configuration for the Agent. -base_config: | - -# A set of snippets to be conditionally merged into the base configuration. -snippets: - [ : ... ] -``` - -### grafana_agent_config - -This is a standard Grafana Agent [static mode configuration](/docs/agent/latest/static/configuration/). Typically used to configure the server, remote_writes, and other global configuration. - -### snippet_content - -The snippet content is a YAML document which conforms to the following schema: - -```yaml -# Config provides the actual snippet configuration. -config: | - [metrics_scrape_configs]: - - [ ... ] - [logs_scrape_configs]: - - [ ... ] - [integration_configs]: - [ ... ] -# Selector is a set of labels used to decide which snippets to apply to the final configuration. -selector: - [ : ... ] -``` - -> **Note:** More information on the following types can be found in their respective documentation pages: -> -> * [`scrape_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -> * [`promtail.scrape_config`](/docs/loki/latest/clients/promtail/configuration/#scrape_configs) -> * [`integrations_config`](/docs/agent/latest/static/configuration/integrations) - -> **Note:** Snippet selection is currently done in the API server. This behaviour is subject to change in the future. - -### Example response body - -```yaml -base_config: | - server: - log_level: info - metrics: - global: - remote_write: - - basic_auth: - password_file: key.txt - username: 123 - url: https://myserver.com/api/prom/push - logs: - positions_directory: /var/lib/grafana-agent - global: - clients: - - basic_auth: - password_file: key.txt - username: 456 - url: https://myserver.com/loki/api/v1/push -snippets: - snip1: - config: | - metrics_scrape_configs: - - job_name: 'prometheus' - scrape_interval: 60s - static_configs: - - targets: ['localhost:9090'] - logs_scrape_configs: - - job_name: 'loki' - static_configs: - - targets: ['localhost:3100'] - integration_configs: - node_exporter: - enabled: true - selector: - os: linux - app: app1 -``` - -> **Note:** Base configurations and snippets can contain go's [text/template](https://pkg.go.dev/text/template) actions. If you need preserve the literal value of a template action, you can escape it using backticks. For example: - -``` -{{ `{{ .template_var }}` }} -``` diff --git a/docs/sources/static/configuration/create-config-file.md b/docs/sources/static/configuration/create-config-file.md deleted file mode 100644 index e4d77a3386..0000000000 --- a/docs/sources/static/configuration/create-config-file.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -aliases: -- ../../configuration/create-config-file/ -- ../../set-up/create-config-file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/create-config-file/ -- /docs/grafana-cloud/send-data/agent/static/configuration/create-config-file/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/create-config-file/ -description: Learn how to create a configuration file -title: Create a configuration file -weight: 50 ---- - -# Create a configuration file - -The Grafana Agent supports configuring multiple independent "subsystems." Each -subsystem helps you collect data for a specific type of telemetry. - -- The **Metrics** subsystem allows you collect metrics to send to Prometheus. -- The **Logs** subsystem allows you to collect logs to send to Grafana Loki. -- The **Traces** subsystem allows you to collect spans to send to Grafana Tempo. -- The **Integrations** subsystem allows you to collect metrics for common - applications, such as MySQL. - -Integrations are recommended for first-time users of observability platforms, -especially newcomers to Prometheus. Users with more experience with Prometheus -or users that already have an existing Prometheus config file can configure -the Prometheus subsystem manually. - -## Integrations - -_Integrations_ are individual features that collect metrics for you. For -example, the `agent` integration collects metrics from that running instance of -the Grafana Agent. The `node_exporter` integration will collect metrics from the -Linux machine that the Grafana Agent is running on. - -```yaml -metrics: - wal_directory: /tmp/wal - global: - remote_write: - - url: http://localhost:9009/api/prom/push - -integrations: - agent: - enabled: true -``` - -In this example, we first must configure the `wal_directory` which is used to -store metrics in a Write-Ahead Log (WAL). The WAL is required and ensures that samples -will be redelivered in case of failure (e.g., network issues, machine reboot). We -also configure `remote_write`, which is where all metrics should be sent by -default. - -Then, the individual `integrations` are configured. In this example, just the -`agent` integration is enabled. Finally, `prometheus_remote_write` is configured -with a location to send metrics. You will have to replace this URL with the -appropriate URL for your `remote_write` system (such as a Grafana Cloud Hosted -Prometheus instance). - -When the Agent is run with this file, it will collect metrics from itself and -send those metrics to the default `remote_write` endpoint. All metrics from -integrations will have an `instance` label matching the hostname of the machine -the Grafana Agent is running on. This label helps to uniquely identify the -source of metrics if you are running multiple Grafana Agents across multiple -machines. - -Full configuration options can be found in the [configuration reference][configure]. - -## Prometheus config/migrating from Prometheus - -The Prometheus subsystem config is useful for those migrating from Prometheus -and those who want to scrape metrics from something that currently does not have -an associated integration. - -To migrate from an existing Prometheus config, use this Agent config as a -template and copy and paste subsections from your existing Prometheus config -into it: - -```yaml -metrics: - global: - # PASTE PROMETHEUS global SECTION HERE - configs: - - name: agent - scrape_configs: - # PASTE scrape_configs SECTION HERE - remote_write: - # PASTE remote_write SECTION HERE -``` - -For example, this configuration file configures the Grafana Agent to -scrape itself without using the integration: - -```yaml -server: - log_level: info - -metrics: - global: - scrape_interval: 1m - configs: - - name: agent - scrape_configs: - - job_name: agent - static_configs: - - targets: ['127.0.0.1:12345'] - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -Like with integrations, full configuration options can be found in the -[configuration][configure]. - -## Loki Config/Migrating from Promtail - -The Loki Config allows for collecting logs to send to a Loki API. Users that are -familiar with Promtail will notice that the Loki config for the Agent matches -their existing Promtail config with the following exceptions: - -- The deprecated field `client` is not present -- The `server` field is not present - -To migrate from an existing Promtail config, make sure you are using `clients` -instead of `client` and remove the `server` block if present. Then paste your -Promtail config into the Agent config file inside of a `logs` section: - -```yaml -logs: - configs: - - name: default - # PASTE YOUR PROMTAIL CONFIG INSIDE OF HERE -``` - -### Full config example - -Here is an example full config file, using integrations, Prometheus, Loki, and -Tempo: - -```yaml -server: - log_level: info - -metrics: - global: - scrape_interval: 1m - remote_write: - - url: http://localhost:9009/api/prom/push - configs: - - name: default - scrape_configs: - - job_name: agent - static_configs: - - targets: ['127.0.0.1:12345'] - -logs: - configs: - - name: default - positions: - filename: /tmp/positions.yaml - scrape_configs: - - job_name: varlogs - static_configs: - - targets: [localhost] - labels: - job: varlogs - __path__: /var/log/*log - clients: - - url: http://localhost:3100/loki/api/v1/push - -traces: - configs: - - name: default - receivers: - jaeger: - protocols: - grpc: # listens on the default jaeger grpc port: 14250 - remote_write: - - endpoint: localhost:55680 - insecure: true # only add this if TLS is not required - batch: - timeout: 5s - send_batch_size: 100 - -integrations: - node_exporter: - enabled: true -``` - -{{% docs/reference %}} -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/flags.md b/docs/sources/static/configuration/flags.md deleted file mode 100644 index 42dc3fb12c..0000000000 --- a/docs/sources/static/configuration/flags.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -aliases: -- ../../configuration/flags/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/flags/ -- /docs/grafana-cloud/send-data/agent/static/configuration/flags/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/flags/ -description: Learn about command-line flags -title: Command-line flags -weight: 100 ---- - -# Command-line flags - -Command-line flags are used to configure settings of Grafana Agent which cannot -be updated at runtime. - -All flags may be prefixed with either one hyphen or two (i.e., both -`-config.file` and `--config.file` are valid). - -> Note: There may be flags returned by `-help` which are not listed here; this -> document only lists flags that do not have an equivalent in the YAML file. - -## Basic - -* `-version`: Print out version information -* `-help`: Print out help - -## Experimental feature flags - -Grafana Agent has some experimental features that require being enabled through -an `-enable-features` flag. This flag takes a comma-delimited list of feature -names to enable. - -Valid feature names are: - -* `remote-configs`: Enable [retrieving][retrieving] config files over HTTP/HTTPS -* `integrations-next`: Enable [revamp][revamp] of the integrations subsystem -* `extra-scrape-metrics`: When enabled, additional time series are exposed for each metrics instance scrape. See [Extra scrape metrics](https://prometheus.io/docs/prometheus/2.45/feature_flags/#extra-scrape-metrics). -* `agent-management`: Enable support for [agent management][management]. - -## Report information usage - -By default, Grafana Agent sends anonymous, but uniquely-identifiable usage information -from your running Grafana Agent instance to Grafana Labs. -These statistics are sent to `stats.grafana.org`. - -Statistics help us better understand how Grafana Agent is used. -This helps us prioritize features and documentation. - -The usage information includes the following details: -* A randomly generated and an anonymous unique ID (UUID). -* Timestamp of when the UID was first generated. -* Timestamp of when the report was created (by default, every 4h). -* Version of running Grafana Agent. -* Operating system Grafana Agent is running on. -* System architecture Grafana Agent is running on. -* List of enabled feature flags. -* List of enabled integrations. - -This list may change over time. All newly reported data will also be documented in the CHANGELOG. - -If you would like to disable the reporting, Grafana Agent provides the flag `-disable-reporting` -to stop the reporting. - -## Support bundles -Grafana Agent allows the exporting of 'support bundles' on the `/-/support` -endpoint. Support bundles are zip files containing commonly-used information -that provide a baseline for debugging issues with the Agent. - -Support bundles contain all information in plain text, so that they can be -inspected before sharing to verify that no sensitive information has leaked. - -Support bundles contain the following data: -* `agent-config.yaml` contains the current agent configuration (when the `-config.enable-read-api` flag is passed). -* `agent-logs.txt` contains the agent logs during the bundle generation. -* `agent-metadata.yaml` contains the agent's build version, operating system, architecture, uptime, plus a string payload defining which extra agent features have been enabled via command-line flags. -* `agent-metrics-instances.json` and `agent-metrics-targets.json` contain the active metric subsystem instances, and the discovered scraped targets for each one. -* `agent-logs-instances.json` and `agent-logs-targets.json` contains the active logs subsystem instances and the discovered log targets for each one. -* `agent-metrics.txt` contains a snapshot of the agent's internal metrics. -* The `pprof/` directory contains Go runtime profiling data (CPU, heap, goroutine, mutex, block profiles) as exported by the pprof package. - -To disable the endpoint that exports these support bundles, you can pass in the -`-disable-support-bundle` command-line flag. - -## Configuration file - -* `-config.file`: Path to the configuration file to load. May be an HTTP(s) URL when the `remote-configs` feature is enabled. -* `-config.file.type`: Type of file which `-config.file` refers to (default `yaml`). Valid values are `yaml` and `dynamic`. -* `-config.expand-env`: Expand environment variables in the loaded configuration file -* `-config.enable-read-api`: Enables the `/-/config` and `/agent/api/v1/configs/{name}` API endpoints to print YAML configuration - -### Remote Configuration - -These flags require the `remote-configs` feature to be enabled: - -`-config.url.basic-auth-user`: Basic Authentication username to use when fetching the remote configuration file -`-config.url.basic-auth-password-file`: File containing a Basic Authentication password to use when fetching the remote configuration file - -## Server - -* `-server.register-instrumentation`: Expose the `/metrics` and `/debug/pprof/` instrumentation handlers over HTTP (default true) -* `-server.graceful-shutdown-timeout`: Timeout for a graceful server shutdown -* `-server.log.source-ips.enabled`: Whether to log IP addresses of incoming requests -* `-server.log.source-ips.header`: Header field to extract incoming IP requests from (defaults to Forwarded, X-Real-IP, X-Forwarded-For) -* `-server.log.source-ips.regex`: Regex to extract the IP out of the read header, using the first capture group as the IP address -* `-server.http.network`: HTTP server listen network (default `tcp`) -* `-server.http.address`: HTTP server listen:port (default `127.0.0.1:12345`) -* `-server.http.enable-tls`: Enable TLS for the HTTP server -* `-server.http.conn-limit`: Maximum number of simultaneous HTTP connections -* `-server.http.idle-timeout`: HTTP server idle timeout -* `-server.http.read-timeout`: HTTP server read timeout -* `-server.http.write-timeout`: HTTP server write timeout -* `-server.http.in-memory-addr`: Internal address used for the agent to make - in-memory HTTP connections to itself. (default `agent.internal:12345`) The - port number specified here is virtual and does not open a real network port. -* `-server.grpc.network` gRPC server listen network (default `grpc`) -* `-server.grpc.address`: gRPC server listen host:port (default `127.0.0.1:12346`) -* `-server.grpc.enable-tls`: Enable TLS for the gRPC server -* `-server.grpc.conn-limit`: Maximum number of simultaneous gRPC connections -* `-server.grpc.keepalive.max-connection-age` Maximum age for any gRPC connection for a graceful shutdown -* `-server.grpc.keepalive.max-connection-age-grace` Grace period to forcibly close connections after a graceful shutdown starts -* `-server.grpc.keepalive.max-connection-idle` Time to wait before closing idle gRPC connections -* `-server.grpc.keepalive.min-time-between-pings` Maximum frequency that clients may send pings at -* `-server.grpc.keepalive.ping-without-stream-allowed` Allow clients to send pings without having a gRPC stream -* `-server.grpc.keepalive.time` Frequency to send keepalive pings from the server -* `-server.grpc.keepalive.timeout` How long to wait for a keepalive pong before closing the connection -* `-server.grpc.max-concurrent-streams` Maximum number of concurrent gRPC streams (0 = unlimited) -* `-server.grpc.max-recv-msg-size-bytes` Maximum size in bytes for received gRPC messages -* `-server.grpc.max-send-msg-size-bytes` Maximum size in bytes for send gRPC messages -* `-server.grpc.in-memory-addr`: Internal address used for the agent to make - in-memory gRPC connections to itself. (default `agent.internal:12346`). The - port number specified here is virtual and does not open a real network port. - -### TLS Support - -TLS support can be enabled with `-server.http.tls-enabled` and -`-server.grpc.tls-enabled` for the HTTP and gRPC servers respectively. - -`server.http_tls_config` and `integrations.http_tls_config` must be set in the -YAML configuration when the `-server.http.tls-enabled` flag is used. - -`server.grpc_tls_config` must be set in the YAML configuration when the -`-server.grpc.tls-enabled` flag is used. - -## Metrics - -* `-metrics.wal-directory`: Directory to store the metrics Write-Ahead Log in - -{{% docs/reference %}} -[retrieving]: "/docs/agent/ -> /docs/agent//static/configuration#remote-configuration-experimental" -[retrieving]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration#remote-configuration-experimental" - -[revamp]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/" -[revamp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next" - -[management]: "/docs/agent/ -> /docs/agent//static/configuration/agent-management" -[management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/agent-management" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/integrations/_index.md b/docs/sources/static/configuration/integrations/_index.md deleted file mode 100644 index f0053c2749..0000000000 --- a/docs/sources/static/configuration/integrations/_index.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -aliases: -- ../../configuration/integrations/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/ -description: Learn about integrations_config -title: integrations_config -weight: 500 ---- - -# integrations_config - -The `integrations_config` block configures how the Agent runs integrations that -scrape and send metrics without needing to run specific Prometheus exporters or -manually write `scrape_configs`: - -```yaml -# Controls the Agent integration -agent: - # Enables the Agent integration, allowing the Agent to automatically - # collect and send metrics about itself. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the agent integration will be run but not scraped and thus not - # remote_written. Metrics for the integration will be exposed at - # /integrations/agent/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - -# Client TLS Configuration -# Client Cert/Key Values need to be defined if the server is requesting a certificate -# (Client Auth Type = RequireAndVerifyClientCert || RequireAnyClientCert). -http_tls_config: - -# Controls the apache_http integration -apache_http: - -# Controls the node_exporter integration -node_exporter: - -# Controls the process_exporter integration -process_exporter: - -# Controls the mysqld_exporter integration -mysqld_exporter: - -# Controls the oracledb integration -oracledb: - -# Controls the redis_exporter integration -redis_exporter: - -# Controls the dnsmasq_exporter integration -dnsmasq_exporter: - -# Controls the elasticsearch_exporter integration -elasticsearch_exporter: - -# Controls the memcached_exporter integration -memcached_exporter: - -# Controls the mssql integration -mssql: - -# Controls the postgres_exporter integration -postgres_exporter: - -# Controls the snmp_exporter integration -snmp_exporter: - -# Controls the snowflake integration -snowflake: - -# Controls the statsd_exporter integration -statsd_exporter: - -# Controls the consul_exporter integration -consul_exporter: - -# Controls the windows_exporter integration -windows_exporter: - -# Controls the kafka_exporter integration -kafka_exporter: - -# Controls the mongodb_exporter integration -mongodb_exporter: - -# Controls the github_exporter integration -github_exporter: - -# Controls the blackbox_exporter integration -blackbox: - -# Controls the CloudWatch exporter integration -cloudwatch_exporter: - -# Controls the azure_exporter integration -azure_exporter: - -# Controls the gcp_exporter integration -gcp_exporter: - -# Controls the squid integration -squid: - -# Automatically collect metrics from enabled integrations. If disabled, -# integrations will be run but not scraped and thus not remote_written. Metrics -# for integrations will be exposed at /integrations//metrics -# and can be scraped by an external process. -[scrape_integrations: | default = true] - -# Extra labels to add to all samples coming from integrations. -labels: - { : } - -# The period to wait before restarting an integration that exits with an -# error. -[integration_restart_backoff: | default = "5s"] - -# A list of remote_write targets. Defaults to global_config.remote_write. -# If provided, overrides the global defaults. -prometheus_remote_write: - - [] -``` diff --git a/docs/sources/static/configuration/integrations/apache-exporter-config.md b/docs/sources/static/configuration/integrations/apache-exporter-config.md deleted file mode 100644 index 3edce2f275..0000000000 --- a/docs/sources/static/configuration/integrations/apache-exporter-config.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/apache-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/apache-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/apache-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/apache-exporter-config/ -description: Learn about apache_http_config -title: apache_http_config ---- - -# apache_http_config - -The `apache_http_config` block configures the `apache_http` integration, -which is an embedded version of -[`apache_exporter`](https://github.com/Lusitaniae/apache_exporter). This allows the collection of Apache [mod_status](https://httpd.apache.org/docs/current/mod/mod_status.html) statistics via HTTP. - -Full reference of options: - -```yaml - # Enables the apache_http integration, allowing the Agent to automatically - # collect metrics for the specified apache http servers. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the apache_http integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/apache_http/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # URI to apache stub status page. - # If your server-status page is secured by http auth, add the credentials to the scrape URL following this example: - # http://user:password@localhost/server-status?auto . - [scrape_uri: | default = "http://localhost/server-status?auto"] - - # Override for HTTP Host header; empty string for no override. - [host_override: | default = ""] - - # Ignore server certificate if using https. - [insecure: | default = false] - -``` diff --git a/docs/sources/static/configuration/integrations/azure-exporter-config.md b/docs/sources/static/configuration/integrations/azure-exporter-config.md deleted file mode 100644 index d2aa146dfc..0000000000 --- a/docs/sources/static/configuration/integrations/azure-exporter-config.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/azure-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/azure-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/azure-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/azure-exporter-config/ -description: Learn about azure_exporter_config -title: azure_exporter_config ---- - -# azure_exporter_config - -## Overview -The `azure_exporter_config` block configures the `azure_exporter` integration, an embedded version of -[`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter), used to -collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). - -The exporter offers the following two options for gathering metrics. - -1. (Default) Use an [Azure Resource Graph](https://azure.microsoft.com/en-us/get-started/azure-portal/resource-graph/#overview) query to identify resources for gathering metrics. - 1. This query will make one API call per resource identified. - 1. Subscriptions with a reasonable amount of resources can hit the [12000 requests per hour rate limit](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling#subscription-and-tenant-limits) Azure enforces. -1. Set the regions to gather metrics from and get metrics for all resources across those regions. - 1. This option will make one API call per subscription, dramatically reducing the number of API calls. - 1. This approach does not work with all resource types, and Azure does not document which resource types do or do not work. - 1. A resource type that is not supported produces errors that look like `Resource type: microsoft.containerservice/managedclusters not enabled for Cross Resource metrics`. - 1. If you encounter one of these errors you must use the default Azure Resource Graph based option to gather metrics. - -## List of Supported Services and Metrics -The exporter supports all metrics defined by Azure Monitor. The complete list of available metrics can be found in the [Azure Monitor documentation](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). -Metrics for this integration are exposed with the template `azure_{type}_{metric}_{aggregation}_{unit}`. As an example, -the Egress metric for BlobService would be exported as `azure_microsoft_storage_storageaccounts_blobservices_egress_total_bytes`. - -## Authentication - -The agent must be running in an environment with access to Azure. The exporter uses the Azure SDK for go and supports authentication via https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure. - -The account used by Grafana Agent needs: -* [Read access to the resources that will be queried by Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/overview#permissions-in-azure-resource-graph) -* Permissions to call the [Microsoft.Insights Metrics API](https://learn.microsoft.com/en-us/rest/api/monitor/metrics/list) which should be the `Microsoft.Insights/Metrics/Read` permission - -## Configuration - -### Config Reference - -```yaml - # - # Common Integration Settings - # - - # Enables the azure_exporter integration, allowing the Agent to automatically collect metrics or expose azure metrics - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is self-scraped. Default will be - # based on subscriptions and ResourceType being monitored - [instance: ] - - # Automatically collect metrics from this integration. If disabled, the exporter integration will be run but not - # scraped and thus not. remote-written. Metrics for the integration will be exposed at - # /integrations/azure_exporter/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter specific configuration - # - - # Required: The azure subscription(s) to scrape metrics from - subscriptions: - [ - ... ] - - # Required: The Azure Resource Type to scrape metrics for - # Valid values can be found as the heading names on this page https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # Ex: Microsoft.Cache/redis - [resource_type: ] - - # Required: The metrics to scrape from resources - # Valid values can be found in the `Metric` column for the`resource_type` https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # Example: - # resource_type: Microsoft.Cache/redis - # metrics: - # - allcachehits - metrics: - [ - ... ] - - # Optional: The [kusto query](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/) filter to apply when searching for resources - # This value will be embedded in to a template query of the form `Resources | where type =~ "" | project id, tags` - # Can't be used if `regions` is set. - [resource_graph_query_filter: ] - - # Optional: The list of regions for gathering metrics. Enables gathering metrics for all resources in the subscription. - # The list of available `regions` to your subscription can be found by running the Azure CLI command `az account list-locations --query '[].name'`. - # Can't be used if `resource_graph_query_filter` is set. - regions: - [ - ... ] - - # Optional: Aggregation to apply for the metrics produced. Valid values are minimum, maximum, average, total, and count - # If no aggregation is specified the value for `Aggregation Type` on the `Metric` is used from https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - metric_aggregations: - [ - ... ] - - # Optional: An [ISO8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) used when querying the metric value - [timespan: | default = "PT1M"] - - # Optional: Used to include `Dimensions` available to a `Metric` definition https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # These will appear as labels on the metrics, - # If a single dimension is requested it will have the name `dimension` - # If multiple dimensions are requested they will have the name `dimension` - # Example: - # resource_type: Microsoft.Cache/redis - # metrics: - # - allcachehits - # included_dimensions: - # - ShardId - # - Port - # - Primary - included_dimensions: - [ - ... ] - - # Optional: A list of resource tags to include on the final metrics - # These are added as labels with the name `tag_` - included_resource_tags: - [ - ... ] - - # Optional: used for ResourceTypes which have multiple levels of metrics - # Example: the resource_type Microsoft.Storage/storageAccounts has metrics for - # Microsoft.Storage/storageAccounts (generic metrics which apply to all storage accounts) - # Microsoft.Storage/storageAccounts/blobServices (generic metrics + metrics which only apply to blob stores) - # Microsoft.Storage/storageAccounts/fileServices (generic metrics + metrics which only apply to file stores) - # Microsoft.Storage/storageAccounts/queueServices (generic metrics + metrics which only apply to queue stores) - # Microsoft.Storage/storageAccounts/tableServices (generic metrics + metrics which only apply to table stores) - # If you want blob store metrics you will need to set - # resource_type: Microsoft.Storage/storageAccounts - # metric_namespace = Microsoft.Storage/storageAccounts/blobServices - [metric_namespace: ] - - # Optional: Which azure cloud environment to connect to, azurecloud, azurechinacloud, azuregovernmentcloud, or azurepprivatecloud - [azure_cloud_environment: | default = "azurecloud"] - - # Optional: Validation is disabled by default to reduce the number of Azure exporter instances required when a `resource_type` has metrics with varying dimensions. - # Choosing to enable `validate_dimensions` will require one exporter instance per metric + dimension combination which can be very tedious to maintain. - [validate_dimensions: | default = false] -``` - -### Examples - -#### Azure Kubernetes Service Node Metrics -```yaml - azure_exporter: - enabled: true - scrape_interval: 60s - subscriptions: - - - resource_type: microsoft.containerservice/managedclusters - metrics: - - node_cpu_usage_millicores - - node_cpu_usage_percentage - - node_disk_usage_bytes - - node_disk_usage_percentage - - node_memory_rss_bytes - - node_memory_rss_percentage - - node_memory_working_set_bytes - - node_memory_working_set_percentage - - node_network_in_bytes - - node_network_out_bytes - included_resource_tags: - - environment - included_dimensions: - - node - - nodepool - - device -``` - -#### Blob Storage Metrics -```yaml - azure_exporter: - enabled: true - scrape_interval: 60s - subscriptions: - - - resource_type: Microsoft.Storage/storageAccounts - metric_namespace: Microsoft.Storage/storageAccounts/blobServices - regions: - - westeurope - metrics: - - Availability - - BlobCapacity - - BlobCount - - ContainerCount - - Egress - - IndexCapacity - - Ingress - - SuccessE2ELatency - - SuccessServerLatency - - Transactions - included_dimensions: - - ApiName - - TransactionType - timespan: PT1H -``` - -### Multiple Azure Services in a single config - -The Azure Metrics API has rather strict limitations on the number of parameters which can be supplied. Due to this, you cannot -gather metrics from multiple `resource_types` in the same `azure_exporter` instance. If you need metrics from multiple resources, -you can enable `integration-next` or configure Agent to expose the exporter via the `azure_exporter` config with data configured through metrics scrape_configs. The following example configuration combines the two examples above in a single Agent configuration. - -> **Note**: This is not a complete configuration; blocks have been removed for simplicity. - -```yaml -integrations: - azure_exporter: - enabled: true - scrape_integration: false - azure_cloud_environment: azurecloud - -metrics: - configs: - - name: integrations - scrape_configs: - - job_name: azure-blob-storage - scrape_interval: 1m - scrape_timeout: 50s - static_configs: - - targets: ["localhost:12345"] - metrics_path: /integrations/azure_exporter/metrics - params: - subscriptions: - - 179c4f30-ebd8-489e-92bc-fb64588dadb3 - resource_type: ["Microsoft.Storage/storageAccounts"] - regions: - - westeurope - metric_namespace: ["Microsoft.Storage/storageAccounts/blobServices"] - metrics: - - Availability - - BlobCapacity - - BlobCount - - ContainerCount - - Egress - - IndexCapacity - - Ingress - - SuccessE2ELatency - - SuccessServerLatency - - Transactions - included_dimensions: - - ApiName - - TransactionType - timespan: ["PT1H"] - - job_name: azure-kubernetes-node - scrape_interval: 1m - scrape_timeout: 50s - static_configs: - - targets: ["localhost:12345"] - metrics_path: /integrations/azure_exporter/metrics - params: - subscriptions: - - 179c4f30-ebd8-489e-92bc-fb64588dadb3 - resource_type: ["microsoft.containerservice/managedclusters"] - resource_graph_query_filter: [" where location == 'westeurope'"] - metrics: - - node_cpu_usage_millicores - - node_cpu_usage_percentage - - node_disk_usage_bytes - - node_disk_usage_percentage - - node_memory_rss_bytes - - node_memory_rss_percentage - - node_memory_working_set_bytes - - node_memory_working_set_percentage - - node_network_in_bytes - - node_network_out_bytes - included_resource_tags: - - environment - included_dimensions: - - node - - nodepool - - device -``` - -In this example, all `azure_exporter`-specific configuration settings have been moved to the `scrape_config`. This method supports all available configuration options except `azure_cloud_environment`, which must be configured on the `azure_exporter`. For this method, if a field supports a singular value like `resource_graph_query_filter`, you -must be put it into an array, for example, `resource_graph_query_filter: ["where location == 'westeurope'"]`. diff --git a/docs/sources/static/configuration/integrations/blackbox-config.md b/docs/sources/static/configuration/integrations/blackbox-config.md deleted file mode 100644 index 77a592ddb0..0000000000 --- a/docs/sources/static/configuration/integrations/blackbox-config.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/blackbox-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/blackbox-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/blackbox-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/blackbox-config/ -description: Learn about blackbox_config -title: blackbox_config ---- - -# blackbox_config - -The `blackbox_config` block configures the `blackbox_exporter` -integration, which is an embedded version of -[`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter). This allows -for the collection of blackbox metrics (probes) and exposing them as Prometheus metrics. - -## Quick configuration example - -To get started, define Blackbox targets in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - blackbox: - enabled: true - blackbox_targets: - - name: example - address: http://example.com - module: http_2xx - blackbox_config: - modules: - http_2xx: - prober: http - timeout: 5s - http: - method: POST - headers: - Content-Type: application/json - body: '{}' - preferred_ip_protocol: "ip4" -``` - -Full reference of options: - -```yaml - # Enables the blackbox_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured statsd server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the blackbox_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/blackbox/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # blackbox configuration file with custom modules. - # This field has precedence to the config defined in the blackbox_config block. - # See https://github.com/prometheus/blackbox_exporter/blob/master/example.yml for more details how to generate custom blackbox.yml file. - [config_file: | default = ""] - - # Embedded blackbox configuration. You can specify your modules here instead of an external config file. - # config_file or blackbox_config must be specified. - # See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for more details how to specify your blackbox modules. - blackbox_config: - [- ... ] - - # List of targets to probe - blackbox_targets: - [- ... ] - - # Option to configure blackbox_exporter. - # Represents the offset to subtract from timeout in seconds when probing targets. - [probe_timeout_offset: | default = 0.5] -``` -## blackbox_target config - -```yaml - # Name of a blackbox_target - [name: ] - - # The address of the target to probe - [address: ] - - # Blackbox module to use to probe - [module: | default = ""] -``` diff --git a/docs/sources/static/configuration/integrations/cadvisor-config.md b/docs/sources/static/configuration/integrations/cadvisor-config.md deleted file mode 100644 index a4a33b4df2..0000000000 --- a/docs/sources/static/configuration/integrations/cadvisor-config.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/cadvisor-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/cadvisor-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/cadvisor-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/cadvisor-config/ -description: Learn about cadvisor_config -title: cadvisor_config ---- - -# cadvisor_config - -The `cadvisor_config` block configures the `cadvisor` integration, -which is an embedded version of -[`cadvisor`](https://github.com/google/cadvisor). This allows for the collection of container utilization metrics. - -The cAdvisor integration requires some broad privileged permissions to the host. Without these permissions the metrics will not be accessible. This means that the agent must *also* have those elevated permissions. - -A good example of the required file, and system permissions can be found in the docker run command published in the [cAdvisor docs](https://github.com/google/cadvisor#quick-start-running-cadvisor-in-a-docker-container). - -Full reference of options: - -```yaml - # Enables the cadvisor integration, allowing the Agent to automatically - # collect metrics for the specified github objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - [instance: | default = ] - - # Automatically collect metrics from this integration. If disabled, - # the cadvisor integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/cadvisor/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # cAdvisor-specific configuration options - # - - # Convert container labels and environment variables into labels on Prometheus metrics for each container. If false, then the only metrics exported are container name, first alias, and image name. `.` aren't valid in Prometheus label names, so if there are any in the container label, they will transformed to `_` when converted to the Prometheus label. - [store_container_labels: | default = true] - - # List of container labels to be converted to labels on Prometheus metrics for each container. store_container_labels must be set to false for this to take effect. This must match the format of the container label, not the converted Prometheus label (`.` are converted to `_` in the Prometheus label). - allowlisted_container_labels: - [ - ] - - # List of environment variable keys matched with specified prefix that needs to be collected for containers, only support containerd and docker runtime for now. - env_metadata_allowlist: - [ - ] - - # List of cgroup path prefix that needs to be collected even when docker_only is specified. - raw_cgroup_prefix_allowlist: - [ - ] - - # Path to a JSON file containing configuration of perf events to measure. Empty value disabled perf events measuring. - [perf_events_config: ] - - # resctrl mon groups updating interval. Zero value disables updating mon groups. - [resctrl_interval: | default = 0] - - # List of `metrics` to be disabled. If set, overrides the default disabled metrics. - disabled_metrics: - [ - ] - - # List of `metrics` to be enabled. If set, overrides disabled_metrics - enabled_metrics: - [ - ] - - # Length of time to keep data stored in memory - [storage_duration: | default = "2m"] - - # Containerd endpoint - [containerd: | default = "/run/containerd/containerd.sock"] - - # Containerd namespace - [containerd_namespace: | default = "k8s.io"] - - # Docker endpoint - [docker: | default = "unix:///var/run/docker.sock"] - - # Use TLS to connect to docker - [docker_tls: | default = false] - - # Path to client certificate for TLS connection to docker - [docker_tls_cert: | default = "cert.pem"] - - # Path to private key for TLS connection to docker - [docker_tls_key: | default = "key.pem"] - - # Path to a trusted CA for TLS connection to docker - [docker_tls_ca: | default = "ca.pem"] - - # Only report docker containers in addition to root stats - [docker_only: | default = false] - - # Disable collecting root Cgroup stats - [disable_root_cgroup_stats: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md b/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md deleted file mode 100644 index 6495625b76..0000000000 --- a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md +++ /dev/null @@ -1,468 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/cloudwatch-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/cloudwatch-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/cloudwatch-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/cloudwatch-exporter-config/ -description: Learn about cloudwatch_exporter_config -title: cloudwatch_exporter_config ---- - -# cloudwatch_exporter_config - -## Overview - -The `cloudwatch_exporter_config` block configures the `cloudwatch_exporter` integration, which is an embedded version of -[`YACE`](https://github.com/nerdswords/yet-another-cloudwatch-exporter/). Use the `cloudwatch_exporter` to collect [AWS CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/WhatIsCloudWatch.html) metrics. - -This integration lets you scrape CloudWatch metrics in a set of configurations that we will call *jobs*. There are -two kind of jobs: [`discovery`](#discovery_job) and [`static`](#static_job). - -## Authentication - -The agent must be running in an environment with access to AWS. The exporter uses the [AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/getting-started/) and -provides authentication via [AWS's default credential chain](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). Regardless of the method used to acquire the credentials, -some permissions are needed for the exporter to work. -``` -"tag:GetResources", -"cloudwatch:GetMetricData", -"cloudwatch:GetMetricStatistics", -"cloudwatch:ListMetrics" -``` - -The following IAM permissions are required for the [Transit Gateway](https://aws.amazon.com/transit-gateway/) attachment (tgwa) metrics to work. -``` -"ec2:DescribeTags", -"ec2:DescribeInstances", -"ec2:DescribeRegions", -"ec2:DescribeTransitGateway*" -``` - -The following IAM permission is required to discover tagged [API Gateway](https://aws.amazon.com/es/api-gateway/) REST APIs: -``` -"apigateway:GET" -``` - -The following IAM permissions are required to discover tagged [Database Migration Service](https://aws.amazon.com/dms/) (DMS) replication instances and tasks: -``` -"dms:DescribeReplicationInstances", -"dms:DescribeReplicationTasks" -``` - -To use all of the integration features, use the following AWS IAM Policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1674249227793", - "Action": [ - "tag:GetResources", - "cloudwatch:GetMetricData", - "cloudwatch:GetMetricStatistics", - "cloudwatch:ListMetrics", - "ec2:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeTransitGateway*", - "apigateway:GET", - "dms:DescribeReplicationInstances", - "dms:DescribeReplicationTasks" - ], - "Effect": "Allow", - "Resource": "*" - } - ] -} -``` - -## Configuration options - -Configuration reference: - -```yaml - # - # Common Integration Settings - # - - # Enables the cloudwatch_exporter integration, allowing the Agent to automatically - # collect CloudWatch metrics as configured. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is a hash of the whole integration configuration. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the cloudwatch_exporter integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/cloudwatch_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected. Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Required: AWS region to use when calling STS (https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) for retrieving - # account information. - # Ex: us-east-2 - sts_region: - - # Optional: Disable use of FIPS endpoints. Set 'true' when running outside of USA regions. - [fips_disabled: | default = false] - - - # Instead of retrieving metrics on request, decouple scraping retrieves the - # metrics on a schedule and returns the cached metrics. - decoupled_scraping: - # Enable decoupled scraping. - [enabled: | default = false ] - - # How often to scrape for CloudWatch metrics. - [scrape_interval: | default = "5m"] - - discovery: - - # Optional: List of tags (value) per service (key) to export in all metrics. For example defining the ["name", "type"] under - # AWS/EC2 will export the name and type tags and its values as labels in all metrics. Affects all discovery jobs. - # Ex: - # exported_tags: - # AWS/EC2: - # - name - exported_tags: - { : [ ] } - - # List of discovery jobs - jobs: [ ] - - # List of static jobs - static: [ ] - - # Optional: Enable debug logging on CloudWatch exporter internals. - [debug: | default = false] -``` - -### discovery_job - -A discovery job allows one to just define the AWS service to scrape, and the metrics under that service/namespace to retrieve. -The agent will find AWS resources in the specified service for which to scrape these metrics, label them appropriately, and -export them to Prometheus. For example, if we wanted to scrape CPU utilization and network traffic metrics, from all AWS -EC2 instances: - -```yaml -sts_region: us-east-2 -discovery: - jobs: - - type: AWS/EC2 - regions: - - us-east-2 - nil_to_zero: true - metrics: - - name: CPUUtilization - period: 5m - statistics: - - Average - nil_to_zero: true - - name: NetworkPacketsIn - period: 5m - statistics: - - Average - nil_to_zero: true -``` - -Configuration reference: - -```yaml - # Required: List of AWS regions. - regions: [ ] - - # Optional: List of IAM roles to assume. Defaults to the role on the environment configured AWS role. - roles: [ ] - - # Required: Cloudwatch service alias ("alb", "ec2", etc) or namespace name ("AWS/EC2", "AWS/S3", etc). See section below for all - # supported. - type: - - # Optional: List of `Key/Value` pairs to use for tag filtering (all must match). Value can be a regex. - search_tags: [ ] - - # Optional: Custom tags to be added as a list of `Key/Value` pairs. When exported to Prometheus format, the label name follows - # the following format: `custom_tag_{Key}`. - custom_tags: [ ] - - # Optional: List of metric dimensions to query. Before querying metric values, the total list of metrics will be filtered to only those that contain exactly this list of dimensions. An empty or undefined list results in all dimension combinations being included. - dimension_name_requirements: [ ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. Default `true`. This can be overridden in the config of each metric. - nil_to_zero: - - # Required: List of metric definitions to scrape. - metrics: [ ] -``` - -### static_job - -A static job allows one to scrape an individual CloudWatch metric. For that, metrics needs to be fully qualified, specifying the following: -1. `namespace`: For example `AWS/EC2`, `AWS/EBS`, `CoolApp` if it were a custom metric, etc. -2. `dimensions`: CloudWatch identifies a metrics by a set of dimensions. For example, all `AWS/EC2` metrics are identified by the `InstanceId` dimension. -3. `metrics`: Metric name and statistics. - -For example, if one wants to scrape the same metrics in the discovery example, but for a specific AWS EC2 instance: - -```yaml -sts_region: us-east-2 -static: - - name: single_ec2_instance - regions: - - us-east-2 - namespace: AWS/EC2 - dimensions: - - name: InstanceId - value: i-0e43cee369aa44b52 - nil_to_zero: true - metrics: - - name: CPUUtilization - period: 5m - statistics: - - Average - nil_to_zero: true - - name: NetworkPacketsIn - period: 5m - statistics: - - Average - nil_to_zero: true -``` - -All dimensions need to be specified when scraping single metrics like the example above. For example `AWS/Logs` metrics -require `Resource`, `Service`, `Class`, and `Type` dimensions to be specified. Same applies to CloudWatch custom metrics, -all dimensions attached to a metric when saved in CloudWatch are required. - -Configuration reference: - -```yaml - # Required: List of AWS regions. - regions: [ ] - - # Optional: List of IAM roles to assume. Defaults to the role on the environment configured AWS role. - roles: [ ] - - # Required: Identifier of the static scraping job. When exported to Prometheus format corresponds to the `name` label. - name: - - # Required: CloudWatch namespace - namespace: - - # Required: CloudWatch metric dimensions as a list of Name/Value pairs. Must uniquely define a single metric. - dimensions: [ ] - - # Optional: Custom tags to be added as a list of Key/Value pairs. When exported, the label name follows the following format: - # `custom_tag_{Key}`. - custom_tags: [ ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. Default `true`. This can be overridden in the config of each metric. - nil_to_zero: - - # Required: List of metric definitions to scrape. - metrics: [ ] -``` - -### aws_role - -Represents an [AWS IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html). Required when configuring a job. If omitted -the AWS role that the credentials configured in the environment posses will be used. - -This is useful when scraping metrics from different AWS accounts with a single pair of credentials. In this case, a different role -is configured for the agent to assume prior to calling AWS APIs, therefore, the credentials configured in the system need -permission to assume the target role. See [this documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_permissions-to-switch.html) on how to configure this. - -```yaml - # Required: AWS IAM Role ARN the exporter should assume to perform AWS API calls. - role_arn: - - # Optional: External ID used when calling STS AssumeRole API. See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for details. - external_id: -``` - -### aws_dimension - -Represents an [AWS CloudWatch Dimension](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension). - -```yaml - name: - value: -``` - -### aws_tag - -Represents an [AWS Tag](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). - -```yaml - key: - value: -``` - -### metric - -Represents an AWS Metrics to scrape, under the context of a job. To see available metrics, AWS does not keep a documentation page with all available metrics. -Follow [this guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/viewing_metrics_with_cloudwatch.html) on how to explore metrics, to easily -pick the ones you need. - -```yaml - # Required: CloudWatch metric name. - name: - - # Required: List of statistic types, e.g. "Minimum", "Maximum", etc. - statistics: [ ] - - # Optional: See the `period and length` section below. - period: [ | default = 5m ] - - # Optional: See the `period and length` section below. - length: [ | default = calculated based on `period` ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. - # When not set, the value defaults to the setting in the parent static or discovery block (`true` if not set in the parent block). - nil_to_zero: -``` - -### Period and length - -`period` controls the width of the time bucket used for aggregating metrics collected from CloudWatch. -`length` controls how far back in time CloudWatch metrics are considered during each agent scrape. -If both settings are configured, the time parameters when calling CloudWatch APIs work as follows: - -![](https://grafana.com/media/docs/agent/cloudwatch-period-and-length-time-model-2.png) - -As noted above, if there is a different `period` or `length` across multiple metrics under the same static or discovery job, -the minimum of all periods, and maximum of all lengths is configured. - -On the other hand, if `length` is not configured, both period and length settings are calculated based on -the required `period` configuration attribute. - -If all metrics within a job (discovery or static) have the same `period` value configured, CloudWatch APIs will be -requested for metrics from the scrape time, to `period`s seconds in the past. -The values of these metrics are exported to Prometheus. - -![](https://grafana.com/media/docs/agent/cloudwatch-single-period-time-model.png) - -On the other hand, if metrics with different `period`s are configured under an individual job, this works differently. -First, two variables are calculated aggregating all periods: `length`, taking the maximum value of all periods, and -the new `period` value, taking the minimum of all periods. Then, CloudWatch APIs will be requested for metrics from -`now - length` to `now`, aggregating each in samples for `period` seconds. For each metric, the most recent sample -is exported to CloudWatch. - -![](https://grafana.com/media/docs/agent/cloudwatch-multiple-period-time-model.png) - -## Supported services in discovery jobs - -The following is a list of AWS services that are supported in `cloudwatch_exporter` discovery jobs. When configuring a -discovery job, the `type` field of each `discovery_job` must match either the desired job namespace or alias. - -- Namespace: `CWAgent` or Alias: `cwagent` -- Namespace: `AWS/Usage` or Alias: `usage` -- Namespace: `AWS/CertificateManager` or Alias: `acm` -- Namespace: `AWS/ACMPrivateCA` or Alias: `acm-pca` -- Namespace: `AmazonMWAA` or Alias: `airflow` -- Namespace: `AWS/MWAA` or Alias: `mwaa` -- Namespace: `AWS/ApplicationELB` or Alias: `alb` -- Namespace: `AWS/AppStream` or Alias: `appstream` -- Namespace: `AWS/Backup` or Alias: `backup` -- Namespace: `AWS/ApiGateway` or Alias: `apigateway` -- Namespace: `AWS/AmazonMQ` or Alias: `mq` -- Namespace: `AWS/AppSync` or Alias: `appsync` -- Namespace: `AWS/Athena` or Alias: `athena` -- Namespace: `AWS/AutoScaling` or Alias: `asg` -- Namespace: `AWS/ElasticBeanstalk` or Alias: `beanstalk` -- Namespace: `AWS/Billing` or Alias: `billing` -- Namespace: `AWS/Cassandra` or Alias: `cassandra` -- Namespace: `AWS/CloudFront` or Alias: `cloudfront` -- Namespace: `AWS/Cognito` or Alias: `cognito-idp` -- Namespace: `AWS/DMS` or Alias: `dms` -- Namespace: `AWS/DDoSProtection` or Alias: `shield` -- Namespace: `AWS/DocDB` or Alias: `docdb` -- Namespace: `AWS/DX` or Alias: `dx` -- Namespace: `AWS/DynamoDB` or Alias: `dynamodb` -- Namespace: `AWS/EBS` or Alias: `ebs` -- Namespace: `AWS/ElastiCache` or Alias: `ec` -- Namespace: `AWS/MemoryDB` or Alias: `memorydb` -- Namespace: `AWS/EC2` or Alias: `ec2` -- Namespace: `AWS/EC2Spot` or Alias: `ec2Spot` -- Namespace: `AWS/ECS` or Alias: `ecs-svc` -- Namespace: `ECS/ContainerInsights` or Alias: `ecs-containerinsights` -- Namespace: `AWS/EFS` or Alias: `efs` -- Namespace: `AWS/ELB` or Alias: `elb` -- Namespace: `AWS/ElasticMapReduce` or Alias: `emr` -- Namespace: `AWS/EMRServerless` or Alias: `emr-serverless` -- Namespace: `AWS/ES` or Alias: `es` -- Namespace: `AWS/Firehose` or Alias: `firehose` -- Namespace: `AWS/FSx` or Alias: `fsx` -- Namespace: `AWS/GameLift` or Alias: `gamelift` -- Namespace: `AWS/GlobalAccelerator` or Alias: `ga` -- Namespace: `Glue` or Alias: `glue` -- Namespace: `AWS/IoT` or Alias: `iot` -- Namespace: `AWS/Kafka` or Alias: `kafka` -- Namespace: `AWS/KafkaConnect` or Alias: `kafkaconnect` -- Namespace: `AWS/Kinesis` or Alias: `kinesis` -- Namespace: `AWS/KinesisAnalytics` or Alias: `kinesis-analytics` -- Namespace: `AWS/Lambda` or Alias: `lambda` -- Namespace: `AWS/MediaConnect` or Alias: `mediaconnect` -- Namespace: `AWS/MediaConvert` or Alias: `mediaconvert` -- Namespace: `AWS/MediaLive` or Alias: `medialive` -- Namespace: `AWS/MediaTailor` or Alias: `mediatailor` -- Namespace: `AWS/Neptune` or Alias: `neptune` -- Namespace: `AWS/NetworkFirewall` or Alias: `nfw` -- Namespace: `AWS/NATGateway` or Alias: `ngw` -- Namespace: `AWS/NetworkELB` or Alias: `nlb` -- Namespace: `AWS/PrivateLinkEndpoints` or Alias: `vpc-endpoint` -- Namespace: `AWS/PrivateLinkServices` or Alias: `vpc-endpoint-service` -- Namespace: `AWS/Prometheus` or Alias: `amp` -- Namespace: `AWS/QLDB` or Alias: `qldb` -- Namespace: `AWS/RDS` or Alias: `rds` -- Namespace: `AWS/Redshift` or Alias: `redshift` -- Namespace: `AWS/Route53Resolver` or Alias: `route53-resolver` -- Namespace: `AWS/Route53` or Alias: `route53` -- Namespace: `AWS/S3` or Alias: `s3` -- Namespace: `AWS/SES` or Alias: `ses` -- Namespace: `AWS/States` or Alias: `sfn` -- Namespace: `AWS/SNS` or Alias: `sns` -- Namespace: `AWS/SQS` or Alias: `sqs` -- Namespace: `AWS/StorageGateway` or Alias: `storagegateway` -- Namespace: `AWS/TransitGateway` or Alias: `tgw` -- Namespace: `AWS/TrustedAdvisor` or Alias: `trustedadvisor` -- Namespace: `AWS/VPN` or Alias: `vpn` -- Namespace: `AWS/ClientVPN` or Alias: `clientvpn` -- Namespace: `AWS/WAFV2` or Alias: `wafv2` -- Namespace: `AWS/WorkSpaces` or Alias: `workspaces` -- Namespace: `AWS/AOSS` or Alias: `aoss` -- Namespace: `AWS/SageMaker` or Alias: `sagemaker` -- Namespace: `/aws/sagemaker/Endpoints` or Alias: `sagemaker-endpoints` -- Namespace: `/aws/sagemaker/TrainingJobs` or Alias: `sagemaker-training` -- Namespace: `/aws/sagemaker/ProcessingJobs` or Alias: `sagemaker-processing` -- Namespace: `/aws/sagemaker/TransformJobs` or Alias: `sagemaker-transform` -- Namespace: `/aws/sagemaker/InferenceRecommendationsJobs` or Alias: `sagemaker-inf-rec` -- Namespace: `AWS/Sagemaker/ModelBuildingPipeline` or Alias: `sagemaker-model-building-pipeline` - diff --git a/docs/sources/static/configuration/integrations/consul-exporter-config.md b/docs/sources/static/configuration/integrations/consul-exporter-config.md deleted file mode 100644 index 469afc264f..0000000000 --- a/docs/sources/static/configuration/integrations/consul-exporter-config.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/consul-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/consul-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/consul-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/consul-exporter-config/ -description: Learn about consul_exporter_config -title: consul_exporter_config ---- - -# consul_exporter_config - -The `consul_exporter_config` block configures the `consul_exporter` -integration, which is an embedded version of -[`consul_exporter`](https://github.com/prometheus/consul_exporter). This allows -for the collection of consul metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the consul_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured consul server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of the server URL. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the consul_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/consul_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Prefix from which to expose key/value pairs. - [kv_prefix: | default = ""] - - # Regex that determines which keys to expose. - [kv_filter: | default = ".*"] - - # Generate a health summary for each service instance. Needs n+1 queries to - # collect all information. - [generate_health_summary: | default = true] - - # HTTP API address of a Consul server or agent. Prefix with https:// to - # connect using HTTPS. - [server: | default = "http://localhost:8500"] - - # Disable TLS host verification. - [insecure_skip_verify: | default = false] - - # File path to a PEM-encoded certificate authority used to validate the - # authenticity of a server certificate. - [ca_file: | default = ""] - - # File path to a PEM-encoded certificate used with the private key to verify - # the exporter's authenticity. - [cert_file: | default = ""] - - # File path to a PEM-encoded private key used with the certificate to verify - # the exporter's authenticity. - [key_file: | default = ""] - - # When provided, this overrides the hostname for the TLS certificate. It can - # be used to ensure that the certificate name matches the hostname we declare. - [server_name: | default = ""] - - # Timeout on HTTP requests to the Consul API. - [timeout: | default = "500ms"] - - # Limit the maximum number of concurrent requests to consul. 0 means no limit. - [concurrent_request_limit: | default = 0] - - # Allows any Consul server (non-leader) to service a read. - [allow_stale: | default = true] - - # Forces the read to be fully consistent. - [require_consistent: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md b/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md deleted file mode 100644 index fe38a827bf..0000000000 --- a/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/dnsmasq-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/dnsmasq-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/dnsmasq-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/dnsmasq-exporter-config/ -description: Learn about dnsmasq_exporter_config -title: dnsmasq_exporter_config ---- - -# dnsmasq_exporter_config - -The `dnsmasq_exporter_config` block configures the `dnsmasq_exporter` integration, -which is an embedded version of -[`dnsmasq_exporter`](https://github.com/google/dnsmasq_exporter). This allows for -the collection of metrics from dnsmasq servers. - -Note that currently, an Agent can only collect metrics from a single dnsmasq -server. If you want to collect metrics from multiple servers, you can run -multiple Agents and add labels using `relabel_configs` to differentiate between -the servers: - -```yaml -dnsmasq_exporter: - enabled: true - dnsmasq_address: dnsmasq-a:53 - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: dnsmasq-a -``` - -Full reference of options: - -```yaml - # Enables the dnsmasq_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured dnsmasq server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the dnsmasq_address - # value. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the dnsmasq_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/dnsmasq_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Address of the dnsmasq server in host:port form. - [dnsmasq_address: | default = "localhost:53"] - - # Path to the dnsmasq leases file. If this file doesn't exist, scraping - # dnsmasq # will fail with an warning log message. - [leases_path: | default = "/var/lib/misc/dnsmasq.leases"] - - # Expose dnsmasq leases as metrics (high cardinality). - [expose_leases: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md b/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md deleted file mode 100644 index 9e0f3ee0f8..0000000000 --- a/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/elasticsearch-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/elasticsearch-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/elasticsearch-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/elasticsearch-exporter-config/ -description: Learn about elasticsearch_exporter_config -title: elasticsearch_exporter_config ---- - -# elasticsearch_exporter_config - -The `elasticsearch_exporter_config` block configures the `elasticsearch_exporter` integration, -which is an embedded version of -[`elasticsearch_exporter`](https://github.com/prometheus-community/elasticsearch_exporter). This allows for -the collection of metrics from ElasticSearch servers. - -Note that currently, an Agent can only collect metrics from a single ElasticSearch server. -However, the exporter is able to collect the metrics from all nodes through that server configured. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/elasticsearch_exporter#elasticsearch-7x-security-privileges). - -Full reference of options: - -```yaml - # Enables the elasticsearch_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured ElasticSearch server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of address. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the elasticsearch_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/elasticsearch_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # HTTP API address of an Elasticsearch node. - [ address: | default = "http://localhost:9200" ] - - # Timeout for trying to get stats from Elasticsearch. - [ timeout: | default = "5s" ] - - # Export stats for all nodes in the cluster. If used, this flag will override the flag `node`. - [ all: ] - - # Node's name of which metrics should be exposed. - [ node: ] - - # Export stats for indices in the cluster. - [ indices: ] - - # Export stats for settings of all indices of the cluster. - [ indices_settings: ] - - # Export stats for cluster settings. - [ cluster_settings: ] - - # Export stats for shards in the cluster (implies indices). - [ shards: ] - - # Export stats for the cluster snapshots. - [ snapshots: ] - - # Cluster info update interval for the cluster label. - [ clusterinfo_interval: | default = "5m" ] - - # Path to PEM file that contains trusted Certificate Authorities for the Elasticsearch connection. - [ ca: ] - - # Path to PEM file that contains the private key for client auth when connecting to Elasticsearch. - [ client_private_key: ] - - # Path to PEM file that contains the corresponding cert for the private key to connect to Elasticsearch. - [ client_cert: ] - - # Skip SSL verification when connecting to Elasticsearch. - [ ssl_skip_verify: ] - - # Include informational aliases metrics. - [ aliases: ] - - # Export stats for Data Streams. - [ data_stream: ] - - # Export stats for SLM (Snapshot Lifecycle Management). - [ slm: ] - - # Sets the `Authorization` header on every ES probe with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] -``` diff --git a/docs/sources/static/configuration/integrations/gcp-exporter-config.md b/docs/sources/static/configuration/integrations/gcp-exporter-config.md deleted file mode 100644 index 56ef46aa93..0000000000 --- a/docs/sources/static/configuration/integrations/gcp-exporter-config.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/gcp-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/gcp-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/gcp-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/gcp-exporter-config/ -description: Learn about gcp_exporter_config -title: gcp_exporter_config ---- - -# gcp_exporter_config - -## Overview -The `gcp_exporter_config` block configures the `gcp_exporter` integration, which is an embedded version of -[`stackdriver_exporter`](https://github.com/prometheus-community/stackdriver_exporter). This allows for the collection of -metrics data from [GCP Cloud Monitoring (formerly stackdriver)](https://cloud.google.com/monitoring/docs). The exporter supports all metrics available via [GCP's monitoring API](https://cloud.google.com/monitoring/api/metrics_gcp). - -Metric names follow the template `stackdriver___`. - -The following example shows a load balancing metric: - -![gcp-exporter-config-metric-example](https://grafana.com/media/docs/agent/gcp-exporter-config-metric-example.png) - -The following list shows its attributes: \ -monitored_resource = `https_lb_rule`\ -metric_type_prefix = `loadbalancing.googleapis.com/`\ -metric_type = `https/backend_latencies` - -These attributes result in a final metric name of: -`stackdriver_https_lb_rule_loadbalancing_googleapis_com_https_backend_latencies` - -## Authentication - -Grafana Agent must be running in an environment with access to the GCP project it is scraping. The exporter -uses the Google Golang Client Library, which offers a variety of ways to [provide credentials](https://developers.google.com/identity/protocols/application-default-credentials). Choose the option that works best for you. - -After deciding how Agent will obtain credentials, ensure the account is set up with the IAM role `roles/monitoring.viewer`. -Since the exporter gathers all of its data from [GCP monitoring APIs](https://cloud.google.com/monitoring/api/v3), this is the only permission needed. - -## Configuration reference - -```yaml - # - # Common Integration Settings - # - - # Enables the gcp_exporter integration, allowing Agent to automatically collect metrics or expose gcp metrics. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is self-scraped. Default is - # based on subscriptions and ResourceType being monitored. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, the exporter integration is run but not - # scraped and thus not remote-written. Metrics for the integration are exposed at - # /integrations/gcp_exporter/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing series that you don't care about to be dropped - # from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration - # - - # Required: Configure the GCP Project(s) to scrape for metrics. - project_ids: - [ - ... ] - - # Required: One or more values from the supported GCP Metrics(https://cloud.google.com/monitoring/api/metrics_gcp). - # These can be as targeted or loose as needed. - # Using pubsub metrics (https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub) as an example - # all metrics. - # - pubsub.googleapis.com/ - # all snapshot specific metrics - # - pubsub.googleapis.com/snapshot - # all snapshot specific metrics and a few subscription metrics - # - pubsub.googleapis.com/snapshot - # - pubsub.googleapis.com/subscription/num_undelivered_messages - # - pubsub.googleapis.com/subscription/oldest_unacked_message_age - metrics_prefixes: - [ - ... ] - - # Optional: Used to further refine the resources you would like to collect metrics from. - # The structure for these filters is :. - # The `targeted_metric_prefix` is used to ensure the filter is only applied to the metric_prefix(es) where it makes sense. - # It does not explicitly have to match a value from `metric_prefixes` but the `targeted_metric_prefix` must be at least a - # prefix to one or more `metric_prefixes`. - # Example: - # metrics_prefixes = pubsub.googleapis.com/snapshot, pubsub.googleapis.com/subscription/num_undelivered_messages - # targeted_metric_prefix options would be: - # pubsub.googleapis.com (apply to all defined prefixes) - # pubsub.googleapis.com/snapshot (apply to only snapshot metrics) - # pubsub.googleapis.com/subscription (apply to only subscription metrics) - # pubsub.googleapis.com/subscription/num_undelivered_messages (apply to only the specific subscription metric) - # The `filter_query` is applied to a final metrics API query when querying for metric data - # You can read more about the metric API filter options in GCPs documentation https://cloud.google.com/monitoring/api/v3/filters. - # The final query sent to the metrics API already includes filters for project and metric type. Each applicable `filter_query` - # is appended to the query with an AND. - extra_filters: - [ - ... ] - - # Optional: The time range used when querying for metrics. - # Most of the time the default works perfectly fine. Most documented metrics include a comments of the form - # `Sampled every X seconds. After sampling, data is not visible for up to Y seconds.` - # As long as your `request_interval` is >= `Y` you should have no issues. - # Consider using `ingest_delay` if you would like this to be done programmatically or are gathering slower moving metrics. - [request_interval: | default = "5m"] - - # Optional: When enabled this automatically adjusts the time range used when querying for metrics backwards based on - # the metadata GCP has published for how long the data can take to be ingested. You can see the values for this in - # documented metrics as `After sampling, data is not visible for up to Y seconds.` - # Since GCPs ingestion delay is an "at worst," this is off by default to ensure data is gathered as soon as it's available. - [ingest_delay: | default = false] - - # Optional: When enabled this offsets the time range used when querying for metrics by a set amount. - [request_offset: | default = "0s"] - - # Optional: When enabled drops metrics from attached projects and only fetches metrics from the explicitly configured `project_ids`. - [drop_delegated_projects: | default = false] - - # Optional: Sets a timeout on the client used to make API calls to GCP. A single scrape can initiate numerous calls to - # GCP, so be mindful if you choose to override this value. - [gcp_client_timeout: | default = "15s"] -``` - -## Configuration Examples - -The following examples show working configurations. See the [Configuration Reference](#configuration-reference) for a full -overview of the configuration options and what they do. - -### Multiple prefixes -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - run.googleapis.com/ - - cloudfunctions.googleapis.com/ - - compute.googleapis.com/nat - - logging.googleapis.com/billing - - logging.googleapis.com/exports - - serviceruntime.googleapis.com/quota/ - - storage.googleapis.com/ - - pubsub.googleapis.com/subscription -``` - -### Load balancing with a filter -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - loadbalancing.googleapis.com - extra_filters: - - loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value" -``` - -### Subset of load balancing metrics with a filter -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - loadbalancing.googleapis.com/https/request_bytes_count - - loadbalancing.googleapis.com/https/total_latencies - extra_filters: - - loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value" -``` diff --git a/docs/sources/static/configuration/integrations/github-exporter-config.md b/docs/sources/static/configuration/integrations/github-exporter-config.md deleted file mode 100644 index c1bbbfe0d0..0000000000 --- a/docs/sources/static/configuration/integrations/github-exporter-config.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/github-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/github-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/github-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/github-exporter-config/ -description: Learn about github_exporter_config -title: github_exporter_config ---- - -# github_exporter_config - -The `github_exporter_config` block configures the `github_exporter` integration, -which is an embedded version of -[`github_exporter`](https://github.com/githubexporter/github-exporter). This allows for the collection of metrics from the GitHub api. - -We strongly recommend that you configure a separate authentication token for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your repositories, as per the [official documentation](https://docs.github.com/en/rest/reference/permissions-required-for-github-apps). -We also recommend that you use `api_token_file` parameter, to avoid setting the authentication token directly on the Agent config file. - -Full reference of options: - -```yaml - # Enables the github_exporter integration, allowing the Agent to automatically - # collect metrics for the specified GitHub objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the github_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/github_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The full URI of the GitHub API. - [api_url: | default = "https://api.github.com"] - - # A list of GitHub repositories for which to collect metrics. - repositories: - [ - ] - - # A list of GitHub organizations for which to collect metrics. - organizations: - [ - ] - - # A list of GitHub users for which to collect metrics. - users: - [ - ] - - # A GitHub authentication token that allows the API to be queried more often. - # Optional, but recommended. - [api_token: ] - - # A path to a file containing a GitHub authentication token that allows the - # API to be queried more often. If supplied, this supersedes `api_token` - # Optional, but recommended. - [api_token_file: ] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/_index.md b/docs/sources/static/configuration/integrations/integrations-next/_index.md deleted file mode 100644 index cfa54bfb9b..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/_index.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/integrations-next/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ -description: Learn about integrations next -menuTitle: Integrations next -title: Integrations next (Experimental) -weight: 100 ---- - -# Integrations next (Experimental) - -Release v0.22.0 of Grafana Agent includes experimental support for a revamped -integrations subsystem. The integrations subsystem is the second oldest part of -Grafana Agent, and has started to feel out of place as we built out the -project. - -The revamped integrations subsystem can be enabled by passing -`integrations-next` to the `-enable-features` command line flag. As an -experimental feature, there are no stability guarantees, and it may receive a -higher frequency of breaking changes than normal. - -The revamped integrations subsystem has the following benefits over the -original subsystem: - -* Integrations can opt in to supporting multiple instances. For example, you - may now run any number of `redis_exporter` integrations, where before you - could only have one per agent. Integrations such as `node_exporter` still - only support a single instance, as it wouldn't make sense to have multiple - instances of those. - -* Autoscrape (previously called "self-scraping"), when enabled, now supports - sending metrics for an integration directly to a running metrics instance. - This allows you configuring an integration to send to a specific Prometheus - remote_write endpoint. - -* A new service discovery HTTP API is included. This can be used with - Prometheus' [http_sd_config][http_sd_config]. The API returns extra labels - for integrations that previously were only available when autoscraping, such - as `agent_hostname`. - -* Integrations that aren't Prometheus exporters may now be added, such as - integrations that generate logs or traces. - -* Autoscrape, when enabled, now works completely in-memory without using the - network. - -[http_sd_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#http_sd_config - -## Config changes - -The revamp contains a number of breaking changes to the config. The schema of the -`integrations` key in the config file is now the following: - -```yaml -integrations: - # Controls settings for integrations that generate metrics. - metrics: - # Controls default settings for autoscrape. Individual instances of - # integrations inherit the defaults and may override them. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Configs for integrations which do not support multiple instances. - [agent: ] - [cadvisor: ] - [node_exporter: ] - [process: ] - [statsd: ] - [windows: ] - [eventhandler: ] - [snmp: ] - [blackbox: ] - - # Configs for integrations that do support multiple instances. Note that - # these must be arrays. - consul_configs: - [- ...] - - dnsmasq_configs: - [- ...] - - elasticsearch_configs: - [- ...] - - github_configs: - [- ...] - - kafka_configs: - [- ...] - - memcached_configs: - [- ...] - - mongodb_configs: - [- ...] - - mssql_configs: - [- ...] - - mysql_configs: - [- ...] - - oracledb_configs: - [ - ...] - - postgres_configs: - [- ...] - - redis_configs: - [- ...] - - snowflake_configs: - [- ...] - - app_agent_receiver_configs: - [- ] - - apache_http_configs: - [- ] - - squid_configs: - [- ...] - - vsphere_configs: - [- ] - - gcp_configs: - [- ] - - azure_configs: - [- ] - - cloudwatch_configs: - [- ] -``` - -Note that most integrations are no longer configured with the `_exporter` name. -`node_exporter` is the only integration with `_exporter` name due to its -popularity in the Prometheus ecosystem. - -## Integrations changes - -Integrations no longer support an `enabled` field; they are enabled by being -defined in the YAML. To disable an integration, comment it out or remove it. - -Metrics-based integrations now use this common set of options: - -```yaml -# Provide an explicit value to uniquely identify this instance of the -# integration. If not provided, a reasonable default will be inferred based -# on the integration. -# -# The value here must be unique across all instances of the same integration. -[instance: ] - -# Override autoscrape defaults for this integration. -autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Relabel the autoscrape job. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration. - metric_relabel_configs: - [ - ... ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - -# An optional extra set of labels to add to metrics from the integration target. These -# labels are only exposed via the integration service discovery HTTP API and -# added when autoscrape is used. They will not be found directly on the metrics -# page for an integration. -extra_labels: - [ : ... ] -``` - -The old set of common options have been removed and do not work when the revamp -is being used: - -```yaml -# OLD SCHEMA: NO LONGER SUPPORTED - -[enabled: | default = false] -[instance: ] -[scrape_integration: | default = ] -[scrape_interval: | default = ] -[scrape_timeout: | default = ] -[wal_truncate_frequency: | default = "60m"] -relabel_configs: - [- ...] -metric_relabel_configs: - [ - ...] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md b/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md deleted file mode 100644 index 7fe049a493..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/app-agent-receiver-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -description: Learn about app_agent_receiver_config next -title: app_agent_receiver_config next ---- - -# app_agent_receiver_config next - -The `app_agent_receiver_config` block configures the `app_agent_receiver` -integration. This integration exposes a http endpoint that can receive telemetry -from the [Grafana Faro Web SDK](https://github.com/grafana/faro-web-sdk) -and forward it to logs, traces or metrics backends. - -These are the options you have for configuring the app_agent_receiver integration. - -```yaml - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Integration instance name - [instance: ] - - # Traces instance to send traces to. This assumes that you have a traces config with such instance defined - [traces_instance: | default = ""] - - # Logs instance to send logs and exceptions to. This assumes that you have a logs - # config with the instance defined - [logs_instance: | default = ""] - - # Server config refers to the HTTP endpoint that the integration will be exposing - # to receive data from. - server: - [host: | default = "127.0.0.1"] - [port: | default = 12347] - - # Domains in which the agent is sending data from. For example "https://myapp.com" - cors_allowed_origins: - [- ] - - # Configure rate limiting. The HTTP server of the App observability implements - # a token bucket rate limitng algorithm in which we can configure the maximum RPS - # as well as the burstiness (peaks of RPS) - rate_limiting: - [enabled: | default = false] - [rps: | default = 100] - [burstiness: | default = 50] - - # If configured, incoming requests will be required to specify this key in "x-api-key" header - [api_key: ] - - # Max allowed payload size in bytes for the JSON payload. Interanlly the - # Content-Length header is used to make this check - [max_allowed_payload_size: | default = 0] - - # Labels to set for the log entry. - # If value is specified, it will be used. - # If value is empty and key exists in data, it's value will be used from data - logs_labels: - [- : ] - - # Timeout duration when sending an entry to Loki, milliseconds - [logs_send_timeout: | default = 2s] - - # Sourcemap configuration for enabling stack trace transformation to original source locations - [sourcemaps: ] -``` - -## sourcemap_config - -```yaml -# Whether agent should attempt to download compiled sources and source maps -[download: | default = false] - -# List of HTTP origins to download sourcemaps for -[download_origins: [] | default = ["*"]] - -# Timeout for downloading compiled sources and sourcemaps -[download_timeout: | default = "1s"] - -# Sourcemap locations on filesystem. Takes precedence over downloading if both methods are enabled -filesystem: - [- ] -``` - -## sourcemap_file_location - -```yaml -# Source URL prefix. If a minified source URL matches this prefix, -# a filepath is constructed by removing the prefix, prepending path below and appending ".map". -# -# Example: -# -# minified_path_prefix = "https://my-app.dev/static/" -# path = "/var/app/static/" -# -# Then given source url "https://my-app.dev/static/foo.js" -# it will look for sourcemap at "/var/app/static/foo.js.map" - -minified_path_prefix: - -# Directory on file system that contains source maps. -# See above for more detailed explanation. -# It is parsed as a Go template. You can use "{{.Release }}" which will be replaced with -# app.release meta property. -path: -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md b/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md deleted file mode 100644 index fa99cf452f..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/blackbox-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/blackbox-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/blackbox-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/blackbox-config/ -description: Learn about blackbox_config next -title: blackbox_config next ---- - -# blackbox_config next - -The `blackbox_config` block configures the `blackbox_exporter` -integration, which is an embedded version of -[`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter). This allows -for the collection of blackbox metrics (probes) and exposing them as Prometheus metrics. - -## Quick configuration example - -To get started, define Blackbox targets in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal - configs: - - name: default -integrations: - blackbox: - blackbox_targets: - - name: example - address: http://example.com - module: http_2xx - blackbox_config: - modules: - http_2xx: - prober: http - timeout: 5s - http: - method: POST - headers: - Content-Type: application/json - body: '{}' - preferred_ip_protocol: "ip4" -``` - -Full reference of options: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - # Override autoscrape defaults for this integration. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # An optional extra set of labels to add to metrics from the integration target. These - # labels are only exposed via the integration service discovery HTTP API and - # added when autoscrape is used. They will not be found directly on the metrics - # page for an integration. - extra_labels: - [ : ... ] - - # - # Exporter-specific configuration options - # - - # blackbox configuration file with custom modules. - # This field has precedence to the config defined in the blackbox_config block. - # See https://github.com/prometheus/blackbox_exporter/blob/master/example.yml for more details how to generate custom blackbox.yml file. - [config_file: | default = ""] - - # Embedded blackbox configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for more details how to specify your blackbox modules. - blackbox_config: - [- ... ] - - # List of targets to probe - blackbox_targets: - [- ... ] - - # Option to configure blackbox_exporter. - # Represents the offset to subtract from timeout in seconds when probing targets. - [probe_timeout_offset: | default = 0.5] -``` -## blackbox_target config - -```yaml - # Name of a blackbox_target - [name: ] - - # The address of the target to probe - [address: ] - - # Blackbox module to use to probe - [module: | default = ""] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md b/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md deleted file mode 100644 index 0008f8c29d..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/eventhandler-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/eventhandler-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/eventhandler-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/eventhandler-config/ -description: Learn about eventhandler_config next -title: eventhandler_config next ---- - -# eventhandler_config next - -`eventhandler_config` configures the Kubernetes eventhandler integration. This -integration watches -[Event](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#event-v1-core) -resources in a Kubernetes cluster and forwards them as log entries to a Loki -sink. This integration depends on the experimental `integrations-next` feature -being enabled. - -On restart, the integration will look for a cache file (configured using -`cache_path`) that stores the last shipped event. This file is optional, and if -present, will be used to avoid double-shipping events if Agent or the -integration restarts. Kubernetes expires events after 60 minutes, so events -older than 60 minutes ago will never be shipped. - -To use the cache feature and maintain state in a Kubernetes environment, a -[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -must be used. Sample manifests are provided at the bottom of this doc. Please -adjust these according to your deployment preferences. You can also use a -Deployment, however the presence of the cache file will not be guaranteed and -the integration may ship duplicate entries in the event of a restart. Loki does -not yet support entry deduplication for the A->B->A case, so further -deduplication can only take place at the Grafana / front-end layer (Grafana -Explore does provide some deduplication features for Loki datasources). - -This integration uses Grafana Agent's embedded Loki-compatible `logs` subsystem -to ship entries, and a logs client and sink must be configured to use the -integration. Please see the sample Agent config below for an example -configuration. -[Pipelines](/docs/loki/latest/clients/promtail/pipelines/) -and relabel configuration are not yet supported, but these features will be -added soon. You should use the `job=eventhandler cluster=...` labels to query -your events (you can then use LogQL on top of the result set). - -If not running the integration in-cluster, the integration will use -`kubeconfig_path` to search for a valid Kubeconfig file, defaulting to a -kubeconfig in the user's home directory. If running in-cluster, the appropriate -`ServiceAccount` and Roles must be defined. Sample manifests are provided -below. - -Configuration reference: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - ## Eventhandler hands watched events off to promtail using a promtail - ## client channel. This parameter configures how long to wait (in seconds) on the channel - ## before abandoning and moving on. - [send_timeout: | default = 60] - - ## Configures the path to a kubeconfig file. If not set, will fall back to using - ## an in-cluster config. If this fails, will fall back to checking the user's home - ## directory for a kubeconfig. - [kubeconfig_path: ] - - ## Path to a cache file that will store the last timestamp for a shipped event and events - ## shipped for that timestamp. Used to prevent double-shipping on integration restart. - [cache_path: | default = "./.eventcache/eventhandler.cache"] - - ## Name of logs subsystem instance to hand log entries off to. - [logs_instance: | default = "default"] - - ## K8s informer resync interval (seconds). You should use defaults here unless you are - ## familiar with K8s informers. - [informer_resync: | default = 120] - - ## The integration will flush the last event shipped out to disk every flush_interval seconds. - [flush_interval: | default = 10] - - ## If you would like to limit events to a given namespace, use this parameter. - [namespace: ] - - ## Configure extra labels to add to log lines - extra_labels: - { : } - - ## Format of the log line. The possible values are "logfmt" and "json". - ## The values are also LogQL parsers, which can be used for processing the logs - [log_format: | default = "logfmt"] -``` - -Sample agent config: - -```yaml -server: - log_level: info - -integrations: - eventhandler: - cache_path: "/etc/eventhandler/eventhandler.cache" - -logs: - configs: - - name: default - clients: - - url: https://logs-prod-us-central1.grafana.net/api/prom/push - basic_auth: - username: YOUR_LOKI_USER - password: YOUR_LOKI_API_KEY - external_labels: - cluster: "cloud" - positions: - filename: /tmp/positions0.yaml -``` - -Be sure to replace the Loki credentials with the appropriate values. - -Sample StatefulSet manifests. Please adjust these according to your needs: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-eventhandler - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-eventhandler -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent-eventhandler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-eventhandler -subjects: -- kind: ServiceAccount - name: grafana-agent-eventhandler - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent-eventhandler-svc -spec: - ports: - - port: 12345 - name: http-metrics - clusterIP: None - selector: - name: grafana-agent-eventhandler ---- -kind: ConfigMap -metadata: - name: grafana-agent-eventhandler - namespace: default -apiVersion: v1 -data: - agent.yaml: | - server: - log_level: info - - integrations: - eventhandler: - cache_path: "/etc/eventhandler/eventhandler.cache" - - logs: - configs: - - name: default - clients: - - url: https://logs-prod-us-central1.grafana.net/api/prom/push - basic_auth: - username: YOUR_LOKI_USER - password: YOUR_LOKI_API_KEY - external_labels: - cluster: "cloud" - positions: - filename: /tmp/positions0.yaml ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana-agent-eventhandler - namespace: default -spec: - serviceName: "grafana-agent-eventhandler-svc" - selector: - matchLabels: - name: grafana-agent-eventhandler - replicas: 1 - template: - metadata: - labels: - name: grafana-agent-eventhandler - spec: - terminationGracePeriodSeconds: 10 - containers: - - name: agent - image: grafana/agent:main - imagePullPolicy: IfNotPresent - args: - - -config.file=/etc/agent/agent.yaml - - -enable-features=integrations-next - - -server.http.address=0.0.0.0:12345 - command: - - /bin/grafana-agent - env: - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ports: - - containerPort: 12345 - name: http-metrics - volumeMounts: - - name: grafana-agent - mountPath: /etc/agent - - name: eventhandler-cache - mountPath: /etc/eventhandler - serviceAccount: grafana-agent-eventhandler - volumes: - - configMap: - name: grafana-agent-eventhandler - name: grafana-agent - volumeClaimTemplates: - - metadata: - name: eventhandler-cache - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md b/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md deleted file mode 100644 index 2f331d6d69..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/snmp-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/snmp-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/snmp-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/snmp-config/ -description: Learn about snmp config next -title: snmp config next ---- - -# snmp config next - -The `snmp` block configures the `snmp` integration, -which is an embedded version of -[`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. - - -## Quick configuration example - -To get started, define SNMP targets in Grafana agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - snmp: - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - - name: network_router_2 - address: 192.168.1.3 - module: mikrotik - walk_params: private - auth: private - walk_params: - private: - retries: 2 - public: - retries: 1 -``` - -## Prometheus service discovery use case - -If you need to scrape SNMP devices in more dynamic environment, and cannot define devices in `snmp_targets` because targets would change over time, you can use service discovery approach. For instance, with [DNS discovery](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#dns_sd_config): - -```yaml - -metrics: - wal_directory: /tmp/wal - configs: - - name: snmp_targets - scrape_configs: - - job_name: 'snmp' - dns_sd_configs: - - names: - - switches.srv.example.org - - routers.srv.example.org - params: - module: [if_mib] - walk_params: [private] - auth: [private] - metrics_path: /integrations/snmp/metrics - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - replacement: 127.0.0.1:12345 # address must match grafana agent -server.http.address flag - target_label: __address__ -integrations: - snmp: - autoscrape: - enable: false # set autoscrape to off - walk_params: - private: - retries: 2 -``` - - -Full reference of options: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - # Override autoscrape defaults for this integration. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # An optional extra set of labels to add to metrics from the integration target. These - # labels are only exposed via the integration service discovery HTTP API and - # added when autoscrape is used. They will not be found directly on the metrics - # page for an integration. - extra_labels: - [ : ... ] - - # - # Exporter-specific configuration options - # - - # SNMP configuration file with custom modules. - # See https://github.com/prometheus/snmp_exporter#generating-configuration for more details how to generate custom snmp.yml file. - # If not defined, embedded snmp_exporter default set of modules is used. - [config_file: | default = ""] - - # Embedded SNMP configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/snmp_exporter/tree/main#generating-configuration for more details how to specify your SNMP modules. - # If this and config_file are not defined, embedded snmp_exporter default set of modules is used. - snmp_config: - [- ... ] - [- ... ] - - # List of SNMP targets to poll - snmp_targets: - [- ... ] - - # Map of SNMP connection profiles that can be used to override default SNMP settings. - walk_params: - [ : ... ] - - -``` -## snmp_target config - -```yaml - # Name of a snmp_target - [name: ] - - # The address of SNMP device - [address: ] - - # SNMP module to use for polling - [module: | default = ""] - - # SNMP authentication profile to use - [auth: | default = ""] - - # walk_param config to use for this snmp_target - [walk_params: | default = ""] -``` - -## walk_param config - -```yaml - # How many objects to request with GET/GETBULK, defaults to 25. - # May need to be reduced for buggy devices. - [max_repetitions: | default = 25] - - # How many times to retry a failed request, defaults to 3. - [retries: | default = 3] - - # Timeout for each SNMP request, defaults to 5s. - [timeout: | default = 5s] -``` - - -## About SNMP modules - -SNMP module is the set of SNMP counters to be scraped together from the specific network device. - -SNMP modules available can be found in the embedded snmp.yml file [here](https://github.com/grafana/agent/blob/main/internal/static/integrations/snmp_exporter/common/snmp.yml). If not specified, `if_mib` module is used. - -If you need to use custom SNMP modules, you can [generate](https://github.com/prometheus/snmp_exporter#generating-configuration) your own snmp.yml file and specify it using `config_file` parameter. diff --git a/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md b/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md deleted file mode 100644 index b63523fe48..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/vsphere-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/vsphere-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/vsphere-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/vsphere-config/ -description: Learn about vsphere_config next -menuTitle: vsphere_config next -title: vsphere config (beta) next ---- - -# vsphere config (beta) next - -The `vsphere_config` block configures the `vmware_exporter` integration, an embedded -version of [`vmware_exporter`](https://github.com/grafana/vmware_exporter), configured -to collect vSphere metrics. This integration is considered beta. - -Configuration reference: - -```yaml - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Integration instance name. This will default to the host:port of the configured - # vsphere_url. - [instance: | default = ] - - # Number of managed objects to include in each request to vsphere when - # fetching performance counters. - [request_chunk_size: | default = 256] - - # Number of concurrent requests to vsphere when fetching performance counters. - [collect_concurrency: | default = 8] - - # Interval on which to run vsphere managed object discovery. Setting this to a - # non-zero value will result in object discovery running in the background. Each - # scrape will use object data gathered during the last discovery. - # When this value is 0, object discovery occurs per scrape. - [discovery_interval: | default = 0] - [enable_exporter_metrics: | default = true] - - # The url of the vCenter SDK endpoint - vsphere_url: - - # vCenter username - vsphere_user: - - # vCenter password - vsphere_password: - -``` - -## Quick configuration example - -```yaml -integrations: - vsphere_configs: - - vsphere_url: https://127.0.0.1:8989/sdk - vsphere_user: user - vsphere_password: pass - request_chunk_size: 256 - collect_concurrency: 8 - instance: vsphere - autoscrape: - enable: true - metrics_instance: default - -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/kafka-exporter-config.md b/docs/sources/static/configuration/integrations/kafka-exporter-config.md deleted file mode 100644 index 14c8e5e990..0000000000 --- a/docs/sources/static/configuration/integrations/kafka-exporter-config.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/kafka-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/kafka-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/kafka-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/kafka-exporter-config/ -description: Learn about kafka_exporter_config -title: kafka_exporter_config ---- - -# kafka_exporter_config - -The `kafka_exporter_config` block configures the `kafka_exporter` -integration, which is an embedded version of [`kafka_exporter`](https://github.com/davidmparrott/kafka_exporter). -This allows for the collection of Kafka Lag metrics and exposing them as Prometheus metrics. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [documentation](https://github.com/lightbend/kafka-lag-exporter#required-permissions-for-kafka-acl). - -Full reference of options: - -```yaml - # Enables the kafka_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured dnsmasq server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of the first kafka_uri value. If there is more than one string - # in kafka_uri, the integration will fail to load and an instance value - # must be manually provided. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the dnsmasq_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/dnsmasq_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Address array (host:port) of Kafka server - [kafka_uris: <[]string>] - - # Connect using SASL/PLAIN - [use_sasl: ] - - # Only set this to false if using a non-Kafka SASL proxy - [use_sasl_handshake: | default = true] - - # SASL user name - [sasl_username: ] - - # SASL user password - [sasl_password: ] - - # The SASL SCRAM SHA algorithm sha256 or sha512 as mechanism - [sasl_mechanism: ] - - # Connect using TLS - [use_tls: ] - - # The optional certificate authority file for TLS client authentication - [ca_file: ] - - # The optional certificate file for TLS client authentication - [cert_file: ] - - # The optional key file for TLS client authentication - [key_file: ] - - # If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - [insecure_skip_verify: ] - - # Kafka broker version - [kafka_version: | default = "2.0.0"] - - # if you need to use a group from zookeeper - [use_zookeeper_lag: ] - - # Address array (hosts) of zookeeper server. - [zookeeper_uris: <[]string>] - - # Kafka cluster name - [kafka_cluster_name: ] - - # Metadata refresh interval - [metadata_refresh_interval: | default = "1m"] - - # If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters - [allow_concurrency: | default = true] - - # Maximum number of offsets to store in the interpolation table for a partition - [max_offsets: | default = 1000] - - # How frequently should the interpolation table be pruned, in seconds - [prune_interval_seconds: | default = 30] - - # Regex filter for topics to be monitored - [topics_filter_regex: | default = ".*"] - - # Regex filter for consumer groups to be monitored - [groups_filter_regex: | default = ".*"] - -``` diff --git a/docs/sources/static/configuration/integrations/memcached-exporter-config.md b/docs/sources/static/configuration/integrations/memcached-exporter-config.md deleted file mode 100644 index a8fe548f7c..0000000000 --- a/docs/sources/static/configuration/integrations/memcached-exporter-config.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/memcached-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/memcached-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/memcached-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/memcached-exporter-config/ -description: Learn about memcached_exporter_config -title: memcached_exporter_config ---- - -# memcached_exporter_config - -The `memcached_exporter_config` block configures the `memcached_exporter` -integration, which is an embedded version of -[`memcached_exporter`](https://github.com/prometheus/memcached_exporter). This -allows for the collection of metrics from memcached servers. - -Note that currently, an Agent can only collect metrics from a single memcached -server. If you want to collect metrics from multiple servers, you can run -multiple Agents and add labels using `relabel_configs` to differentiate between -the servers: - -```yaml -memcached_exporter: - enabled: true - memcached_address: memcached-a:53 - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: memcached-a -``` - -Full reference of options: - -```yaml - # Enables the memcached_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured memcached server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from - # memcached_address. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the memcached_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/memcached_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Address of the memcached server in host:port form. - [memcached_address: | default = "localhost:53"] - - # Timeout for connecting to memcached. - [timeout: | default = "1s"] - - # TLS configuration for requests to the memcached server. - tls_config: - # The CA cert to use. - [ca: ] - # The client cert to use. - [cert: ] - # The client key to use. - [key: ] - - # Path to the CA cert file to use. - [ca_file: ] - # Path to the client cert file to use. - [cert_file: ] - # Path to the client key file to use. - [key_file: ] - - # Used to verify the hostname for the memcached server. - [server_name: ] - - # Disable memcached server certificate validation. - [insecure_skip_verify: | default = false] - - # Minimum TLS version. - [min_version: ] - # Maximum TLS version. - [max_version: ] -``` diff --git a/docs/sources/static/configuration/integrations/mongodb_exporter-config.md b/docs/sources/static/configuration/integrations/mongodb_exporter-config.md deleted file mode 100644 index 4ed4b14b2b..0000000000 --- a/docs/sources/static/configuration/integrations/mongodb_exporter-config.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mongodb_exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mongodb_exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mongodb_exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mongodb_exporter-config/ -description: Learn about mongodb_exporter_config -title: mongodb_exporter_config ---- - -# mongodb_exporter_config - -The `mongodb_exporter_config` block configures the `mongodb_exporter` integration, which is an embedded version of percona's [`mongodb_exporter`](https://github.com/percona/mongodb_exporter). - -In order for this integration to work properly, you have to connect each node of your mongoDB cluster to an agent instance. -That's because this exporter does not collect metrics from multiple nodes. -Additionally, you need to define two custom label for your metrics using relabel_configs. -The first one is service_name, which is how you identify this node in your cluster (example: ReplicaSet1-Node1). -The second one is mongodb_cluster, which is the name of your mongodb cluster, and must be set the same value for all nodes composing the cluster (example: prod-cluster). -Here`s an example: - -```yaml -relabel_configs: - - source_labels: [__address__] - target_label: service_name - replacement: 'replicaset1-node1' - - source_labels: [__address__] - target_label: mongodb_cluster - replacement: 'prod-cluster' -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/percona/mongodb_exporter#permissions). - -Besides that, there's not much to configure. Please refer to the full reference of options: - -```yaml - # Enables the mongodb_exporter integration - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of the mongodb_uri field. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the mongodb_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/mongodb_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # metrics.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # metrics.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # MongoDB node connection URL, which must be in the [`Standard Connection String Format`](https://docs.mongodb.com/manual/reference/connection-string/#std-label-connections-standard-connection-string-format) - [mongodb_uri: ] - - # Whether or not a direct connect should be made. Direct connections are not valid if multiple hosts are specified or an SRV URI is used - [direct_connect: | default = true] - - # Enable autodiscover collections - [discovering_mode: | default = false] - - # Path to the file having Prometheus TLS config for basic auth. Only enable if you want to use TLS based authentication. - [tls_basic_auth_config_path: | default = ""] -``` - -For `tls_basic_auth_config_path`, check [`tls_config`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config) for reference on the file format to be used. diff --git a/docs/sources/static/configuration/integrations/mssql-config.md b/docs/sources/static/configuration/integrations/mssql-config.md deleted file mode 100644 index 9152414c4f..0000000000 --- a/docs/sources/static/configuration/integrations/mssql-config.md +++ /dev/null @@ -1,333 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mssql-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mssql-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mssql-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mssql-config/ -description: Learn about mssql_config -title: mssql_config ---- - -# mssql_config - -The `mssql_config` block configures the `mssql` integration, an embedded version of [`sql_exporter`](https://github.com/burningalchemist/sql_exporter) that lets you collect [Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) metrics. - -It is recommended that you have a dedicated user set up for monitoring an mssql instance. -The user for monitoring must have the following grants in order to populate the metrics: -``` -GRANT VIEW ANY DEFINITION TO -GRANT VIEW SERVER STATE TO -``` - -## Quick configuration example - -To get started, define the MSSQL connection string in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - mssql: - enabled: true - connection_string: "sqlserver://[user]:[pass]@localhost:1433" -``` - -Full reference of options: - -```yaml - # Enables the MSSQL integration, allowing the Agent to automatically - # collect metrics for the specified MSSQL instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the host:port of the provided connection_string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the MSSQL integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/mssql/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The connection_string to use to connect to the MSSQL instance. - # It is specified in the form of: "sqlserver://:@:" - connection_string: - - # The maximum number of open database connections to the MSSQL instance. - [max_open_connections: | default = 3] - - # The maximum number of idle database connections to the MSSQL instance. - [max_idle_connections: | default = 3] - - # The timeout for scraping metrics from the MSSQL instance. - [timeout: | default = "10s"] - - # Embedded MSSQL query configuration for specifying custom MSSQL Prometheus metrics. - # See https://github.com/burningalchemist/sql_exporter#collectors for more details how to specify your metric configurations. - query_config: - [- ... ] - [- ... ]] -``` - -### Authentication -By default, the `USERNAME` and `PASSWORD` used within the `connection_string` argument corresponds to a SQL Server username and password. - -If Grafana Agent is running in the same Windows domain as the SQL Server, then you can use the parameter `authenticator=winsspi` within the `connection_string` to authenticate without any additional credentials. - -```conn -sqlserver://@:?authenticator=winsspi -``` - -If you want to use Windows credentials to authenticate, instead of SQL Server credentials, you can use the parameter `authenticator=ntlm` within the `connection_string`. -The `USERNAME` and `PASSWORD` then corresponds to a Windows username and password. -The Windows domain may need to be prefixed to the username with a trailing `\`. - -```conn -sqlserver://:@:?authenticator=ntlm -``` - -## Custom metrics -You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. - -If this is defined, the new configuration will be used to query your MSSQL instance and create whatever Prometheus metrics are defined. -If you want additional metrics on top of the default metrics, the default configuration must be used as a base. - -The default configuration used by this integration is as follows: -``` -collector_name: mssql_standard - -metrics: - - metric_name: mssql_local_time_seconds - type: gauge - help: 'Local time in seconds since epoch (Unix time).' - values: [unix_time] - query: | - SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time - - metric_name: mssql_connections - type: gauge - help: 'Number of active connections.' - key_labels: - - db - values: [count] - query: | - SELECT DB_NAME(sp.dbid) AS db, COUNT(sp.spid) AS count - FROM sys.sysprocesses sp - GROUP BY DB_NAME(sp.dbid) - # - # Collected from sys.dm_os_performance_counters - # - - metric_name: mssql_deadlocks_total - type: counter - help: 'Number of lock requests that resulted in a deadlock.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total' - - metric_name: mssql_user_errors_total - type: counter - help: 'Number of user errors.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors' - - metric_name: mssql_kill_connection_errors_total - type: counter - help: 'Number of severe errors that caused SQL Server to kill the connection.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors' - - metric_name: mssql_page_life_expectancy_seconds - type: gauge - help: 'The minimum number of seconds a page will stay in the buffer pool on this node without references.' - values: [cntr_value] - query: | - SELECT top(1) cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Page life expectancy' - - metric_name: mssql_batch_requests_total - type: counter - help: 'Number of command batches received.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Batch Requests/sec' - - metric_name: mssql_log_growths_total - type: counter - help: 'Number of times the transaction log has been expanded, per database.' - key_labels: - - db - values: [cntr_value] - query: | - SELECT rtrim(instance_name) AS db, cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' - - metric_name: mssql_buffer_cache_hit_ratio - type: gauge - help: 'Ratio of requests that hit the buffer cache' - values: [BufferCacheHitRatio] - query: | - SELECT (a.cntr_value * 1.0 / b.cntr_value) * 100.0 as BufferCacheHitRatio - FROM sys.dm_os_performance_counters a - JOIN (SELECT cntr_value, OBJECT_NAME - FROM sys.dm_os_performance_counters - WHERE counter_name = 'Buffer cache hit ratio base' - AND OBJECT_NAME = 'SQLServer:Buffer Manager') b ON a.OBJECT_NAME = b.OBJECT_NAME - WHERE a.counter_name = 'Buffer cache hit ratio' - AND a.OBJECT_NAME = 'SQLServer:Buffer Manager' - - - metric_name: mssql_checkpoint_pages_sec - type: gauge - help: 'Checkpoint Pages Per Second' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters - WHERE [counter_name] = 'Checkpoint pages/sec' - # - # Collected from sys.dm_io_virtual_file_stats - # - - metric_name: mssql_io_stall_seconds_total - type: counter - help: 'Stall time in seconds per database and I/O operation.' - key_labels: - - db - value_label: operation - values: - - read - - write - query_ref: mssql_io_stall - - # - # Collected from sys.dm_os_process_memory - # - - metric_name: mssql_resident_memory_bytes - type: gauge - help: 'SQL Server resident memory size (AKA working set).' - values: [resident_memory_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_virtual_memory_bytes - type: gauge - help: 'SQL Server committed virtual memory size.' - values: [virtual_memory_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_available_commit_memory_bytes - type: gauge - help: 'SQL Server available to be committed memory size.' - values: [available_commit_limit_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_memory_utilization_percentage - type: gauge - help: 'The percentage of committed memory that is in the working set.' - values: [memory_utilization_percentage] - query_ref: mssql_process_memory - - - metric_name: mssql_page_fault_count_total - type: counter - help: 'The number of page faults that were incurred by the SQL Server process.' - values: [page_fault_count] - query_ref: mssql_process_memory - - # - # Collected from sys.dm_os_sys_info - # - - metric_name: mssql_server_total_memory_bytes - type: gauge - help: 'SQL Server committed memory in the memory manager.' - values: [committed_memory_bytes] - query_ref: mssql_os_sys_info - - - metric_name: mssql_server_target_memory_bytes - type: gauge - help: 'SQL Server target committed memory set for the memory manager.' - values: [committed_memory_target_bytes] - query_ref: mssql_os_sys_info - - # - # Collected from sys.dm_os_sys_memory - # - - metric_name: mssql_os_memory - type: gauge - help: 'OS physical memory, used and available.' - value_label: 'state' - values: [used, available] - query: | - SELECT - (total_physical_memory_kb - available_physical_memory_kb) * 1024 AS used, - available_physical_memory_kb * 1024 AS available - FROM sys.dm_os_sys_memory - - metric_name: mssql_os_page_file - type: gauge - help: 'OS page file, used and available.' - value_label: 'state' - values: [used, available] - query: | - SELECT - (total_page_file_kb - available_page_file_kb) * 1024 AS used, - available_page_file_kb * 1024 AS available - FROM sys.dm_os_sys_memory -queries: - # Populates `mssql_io_stall` and `mssql_io_stall_total` - - query_name: mssql_io_stall - query: | - SELECT - cast(DB_Name(a.database_id) as varchar) AS [db], - sum(io_stall_read_ms) / 1000.0 AS [read], - sum(io_stall_write_ms) / 1000.0 AS [write] - FROM - sys.dm_io_virtual_file_stats(null, null) a - INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id - GROUP BY a.database_id - # Populates `mssql_resident_memory_bytes`, `mssql_virtual_memory_bytes`, mssql_available_commit_memory_bytes, - # and `mssql_memory_utilization_percentage`, and `mssql_page_fault_count_total` - - query_name: mssql_process_memory - query: | - SELECT - physical_memory_in_use_kb * 1024 AS resident_memory_bytes, - virtual_address_space_committed_kb * 1024 AS virtual_memory_bytes, - available_commit_limit_kb * 1024 AS available_commit_limit_bytes, - memory_utilization_percentage, - page_fault_count - FROM sys.dm_os_process_memory - # Populates `mssql_server_total_memory_bytes` and `mssql_server_target_memory_bytes`. - - query_name: mssql_os_sys_info - query: | - SELECT - committed_kb * 1024 AS committed_memory_bytes, - committed_target_kb * 1024 AS committed_memory_target_bytes - FROM sys.dm_os_sys_info -``` diff --git a/docs/sources/static/configuration/integrations/mysqld-exporter-config.md b/docs/sources/static/configuration/integrations/mysqld-exporter-config.md deleted file mode 100644 index 8f266787ad..0000000000 --- a/docs/sources/static/configuration/integrations/mysqld-exporter-config.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mysqld-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mysqld-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mysqld-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mysqld-exporter-config/ -description: Learn about mysqld_exporter_config -title: mysqld_exporter_config ---- - -# mysqld_exporter_config - -The `mysqld_exporter_config` block configures the `mysqld_exporter` integration, -which is an embedded version of -[`mysqld_exporter`](https://github.com/prometheus/mysqld_exporter) -and allows for collection metrics from MySQL servers. - -Note that currently, an Agent can only collect metrics from a single MySQL -server. If you want to collect metrics from multiple servers, run multiple -Agents and add labels using `relabel_configs` to differentiate between the MySQL -servers: - -```yaml -mysqld_exporter: - enabled: true - data_source_name: root@(server-a:3306)/ - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: server-a -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus/mysqld_exporter#required-grants). - -Full reference of options: - -```yaml - # Enables the mysqld_exporter integration, allowing the Agent to collect - # metrics from a MySQL server. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is a truncated version of the - # connection DSN, containing only the server and db name. (Credentials - # are not included.) - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the mysqld_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/mysqld_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Data Source Name specifies the MySQL server to connect to. This is REQUIRED - # but may also be specified by the MYSQLD_EXPORTER_DATA_SOURCE_NAME - # environment variable. If neither are set, the integration will fail to - # start. - # - # The format of this is specified here: https://github.com/go-sql-driver/mysql#dsn-data-source-name - # - # A working example value for a server with no required password - # authentication is: "root@(localhost:3306)/" - data_source_name: - - # A list of collector names to enable on top of the default set. - enable_collectors: - [ - ] - # A list of collector names to disable from the default set. - disable_collectors: - [ - ] - # A list of collectors to run. Fully overrides the default set. - set_collectors: - [ - ] - - # Set a lock_wait_timeout on the connection to avoid long metadata locking. - [lock_wait_timeout: | default = 2] - # Add a low_slow_filter to avoid slow query logging of scrapes. NOT supported - # by Oracle MySQL. - [log_slow_filter: | default = false] - - ## Collector-specific options - - # Minimum time a thread must be in each state to be counted. - [info_schema_processlist_min_time: | default = 0] - # Enable collecting the number of processes by user. - [info_schema_processlist_processes_by_user: | default = true] - # Enable collecting the number of processes by host. - [info_schema_processlist_processes_by_host: | default = true] - # The list of databases to collect table stats for. * for all - [info_schema_tables_databases: | default = "*"] - # Limit the number of events statements digests by response time. - [perf_schema_eventsstatements_limit: | default = 250] - # Limit how old the 'last_seen' events statements can be, in seconds. - [perf_schema_eventsstatements_time_limit: | default = 86400] - # Maximum length of the normalized statement text. - [perf_schema_eventsstatements_digtext_text_limit: | default = 120] - # Regex file_name filter for performance_schema.file_summary_by_instance - [perf_schema_file_instances_filter: | default = ".*"] - # Remove path prefix in performance_schema.file_summary_by_instance - [perf_schema_file_instances_remove_prefix: | default = "/var/lib/mysql"] - # Remove instrument prefix in performance_schema.memory_summary_global_by_event_name - [perf_schema_memory_events_remove_prefix: | default = "memory/"] - # Database from where to collect heartbeat data. - [heartbeat_database: | default = "heartbeat"] - # Table from where to collect heartbeat data. - [heartbeat_table: | default = "heartbeat"] - # Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`) - [heartbeat_utc: | default = false] - # Enable collecting user privileges from mysql.user - [mysql_user_privileges: | default = false] -``` - -The full list of collectors that are supported for `mysqld_exporter` is: - -| Name | Description | Enabled by default | -| ------------------------------------------------ | ----------- | ------------------ | -| auto_increment.columns | Collect auto_increment columns and max values from information_schema | no | -| binlog_size | Collect the current size of all registered binlog files | no | -| engine_innodb_status | Collect from SHOW ENGINE INNODB STATUS | no | -| engine_tokudb_status | Collect from SHOW ENGINE TOKUDB STATUS | no | -| global_status | Collect from SHOW GLOBAL STATUS | yes | -| global_variables | Collect from SHOW GLOBAL VARIABLES | yes | -| heartbeat | Collect from heartbeat | no | -| info_schema.clientstats | If running with userstat=1, enable to collect client statistics | no | -| info_schema.innodb_cmpmem | Collect metrics from information_schema.innodb_cmpmem | yes | -| info_schema.innodb_metrics | Collect metrics from information_schema.innodb_metrics | yes | -| info_schema.innodb_tablespaces | Collect metrics from information_schema.innodb_sys_tablespaces | no | -| info_schema.processlist | Collect current thread state counts from the information_schema.processlist | no | -| info_schema.query_response_time | Collect query response time distribution if query_response_time_stats is ON | yes | -| info_schema.replica_host | Collect metrics from information_schema.replica_host_status | no | -| info_schema.schemastats | If running with userstat=1, enable to collect schema statistics | no | -| info_schema.tables | Collect metrics from information_schema.tables | no | -| info_schema.tablestats | If running with userstat=1, enable to collect table statistics | no | -| info_schema.userstats | If running with userstat=1, enable to collect user statistics | no | -| mysql.user | Collect data from mysql.user | no | -| perf_schema.eventsstatements | Collect metrics from performance_schema.events_statements_summary_by_digest | no | -| perf_schema.eventsstatementssum | Collect metrics of grand sums from performance_schema.events_statements_summary_by_digest | no | -| perf_schema.eventswaits | Collect metrics from performance_schema.events_waits_summary_global_by_event_name | no | -| perf_schema.file_events | Collect metrics from performance_schema.file_summary_by_event_name | no | -| perf_schema.file_instances | Collect metrics from performance_schema.file_summary_by_instance | no | -| perf_schema.indexiowaits | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage | no | -| perf_schema.memory_events | Collect metrics from performance_schema.memory_summary_global_by_event_name |no | -| perf_schema.replication_applier_status_by_worker | Collect metrics from performance_schema.replication_applier_status_by_worker | no | -| perf_schema.replication_group_member_stats | Collect metrics from performance_schema.replication_group_member_stats | no | -| perf_schema.replication_group_members | Collect metrics from performance_schema.replication_group_members | no | -| perf_schema.tableiowaits | Collect metrics from performance_schema.table_io_waits_summary_by_table | no | -| perf_schema.tablelocks | Collect metrics from performance_schema.table_lock_waits_summary_by_table | no | -| slave_hosts | Scrape information from 'SHOW SLAVE HOSTS' | no | -| slave_status | Scrape information from SHOW SLAVE STATUS | yes | diff --git a/docs/sources/static/configuration/integrations/node-exporter-config.md b/docs/sources/static/configuration/integrations/node-exporter-config.md deleted file mode 100644 index 9919464056..0000000000 --- a/docs/sources/static/configuration/integrations/node-exporter-config.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/node-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/node-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/node-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/node-exporter-config/ -description: Learn about node_exporter_config -title: node_exporter_config ---- - -# node_exporter_config - -The `node_exporter_config` block configures the `node_exporter` integration, -which is an embedded version of -[`node_exporter`](https://github.com/prometheus/node_exporter) -and allows for collecting metrics from the UNIX system that `node_exporter` is -running on. It provides a significant amount of collectors that are responsible -for monitoring various aspects of the host system. - -Note that if running the Agent in a container, you will need to bind mount -folders from the host system so the integration can monitor them. You can use -the example below, making sure to replace `/path/to/config.yaml` with a path on -your host machine where an Agent configuration file is: - -``` -docker run \ - --net="host" \ - --pid="host" \ - --cap-add=SYS_TIME \ - -v "/:/host/root:ro,rslave" \ - -v "/sys:/host/sys:ro,rslave" \ - -v "/proc:/host/proc:ro,rslave" \ - -v /tmp/agent:/etc/agent \ - -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} \ - --config.file=/etc/agent-config/agent.yaml -``` - -Use this configuration file for testing out `node_exporter` support, replacing -the `remote_write` settings with settings appropriate for you: - -```yaml -server: - log_level: info - -metrics: - wal_directory: /tmp/agent - global: - scrape_interval: 60s - remote_write: - - url: https://prometheus-us-central1.grafana.net/api/prom/push - basic_auth: - username: user-id - password: api-token - -integrations: - node_exporter: - enabled: true - rootfs_path: /host/root - sysfs_path: /host/sys - procfs_path: /host/proc - udev_data_path: /host/root/run/udev/data -``` - -For running on Kubernetes, ensure to set the equivalent mounts and capabilities -there as well: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: agent -spec: - containers: - - image: {{< param "AGENT_RELEASE" >}} - name: agent - args: - - --config.file=/etc/agent-config/agent.yaml - securityContext: - capabilities: - add: ["SYS_TIME"] - privileged: true - runAsUser: 0 - volumeMounts: - - name: rootfs - mountPath: /host/root - readOnly: true - - name: sysfs - mountPath: /host/sys - readOnly: true - - name: procfs - mountPath: /host/proc - readOnly: true - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - volumes: - - name: rootfs - hostPath: - path: / - - name: sysfs - hostPath: - path: /sys - - name: procfs - hostPath: - path: /proc -``` - -The manifest and Tanka configs provided by this repository do not have the -mounts or capabilities required for running this integration. - -Some collectors only work on specific operating systems, documented in the -table below. Enabling a collector that is not supported by the operating system -the Agent is running on is a no-op. - -| Name | Description | OS | Enabled by default | -| ---------------- | ----------- | -- | ------------------ | -| arp | Exposes ARP statistics from /proc/net/arp. | Linux | yes | -| bcache | Exposes bcache statistics from /sys/fs/bcache. | Linux | yes | -| bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux | yes | -| boottime | Exposes system boot time derived from the kern.boottime sysctl. | Darwin, Dragonfly, FreeBSD, NetBSD, OpenBSD, Solaris | yes | -| btrfs | Exposes statistics on btrfs. | Linux | yes | -| buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux | no | -| cgroups | Exposes number of active and enabled cgroups. | Linux | no | -| conntrack | Shows conntrack statistics (does nothing if no /proc/sys/net/netfilter/ present). | Linux | yes | -| cpu | Exposes CPU statistics. | Darwin, Dragonfly, FreeBSD, Linux, Solaris, NetBSD | yes | -| cpufreq | Exposes CPU frequency statistics. | Linux, Solaris | yes | -| devstat | Exposes device statistics. | Dragonfly, FreeBSD | no | -| diskstats | Exposes disk I/O statistics. | Darwin, Linux, OpenBSD | yes | -| dmi | Exposes DMI information. | Linux | yes | -| drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4). | Linux | no | -| drm | Exposes GPU card info from /sys/class/drm/card?/device | Linux | no | -| edac | Exposes error detection and correction statistics. | Linux | yes | -| entropy | Exposes available entropy. | Linux | yes | -| ethtool | Exposes ethtool stats | Linux | no | -| exec | Exposes execution statistics. | Dragonfly, FreeBSD | yes | -| fibrechannel | Exposes FibreChannel statistics. | Linux | yes | -| filefd | Exposes file descriptor statistics from /proc/sys/fs/file-nr. | Linux | yes | -| filesystem | Exposes filesystem statistics, such as disk space used. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD | yes | -| hwmon | Exposes hardware monitoring and sensor data from /sys/class/hwmon. | Linux | yes | -| infiniband | Exposes network statistics specific to InfiniBand and Intel OmniPath configurations. | Linux | yes | -| interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD | no | -| ipvs | Exposes IPVS status from /proc/net/ip_vs and stats from /proc/net/ip_vs_stats. | Linux | yes | -| ksmd | Exposes kernel and system statistics from /sys/kernel/mm/ksm. | Linux | no | -| lnstat | Exposes Linux network cache stats | Linux | no | -| loadavg | Exposes load average. | Darwin, Dragonfly, FreeBSD, Linux, NetBSD, OpenBSD, Solaris | yes | -| logind | Exposes session counts from logind. | Linux | no | -| mdadm | Exposes statistics about devices in /proc/mdstat (does nothing if no /proc/mdstat present). | Linux | yes | -| meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD, NetBSD | yes | -| meminfo_numa | Exposes memory statistics from /proc/meminfo_numa. | Linux | no | -| mountstats | Exposes filesystem statistics from /proc/self/mountstats. Exposes detailed NFS client statistics. | Linux | no | -| netclass | Exposes network interface info from /sys/class/net. | Linux | yes | -| netisr | Exposes netisr statistics. | FreeBSD | yes | -| netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD | yes | -| netstat | Exposes network statistics from /proc/net/netstat. This is the same information as netstat -s. | Linux | yes | -| network_route | Exposes network route statistics. | Linux | no | -| nfs | Exposes NFS client statistics from /proc/net/rpc/nfs. This is the same information as nfsstat -c. | Linux | yes | -| nfsd | Exposes NFS kernel server statistics from /proc/net/rpc/nfsd. This is the same information as nfsstat -s. | Linux | yes | -| ntp | Exposes local NTP daemon health to check time. | any | no | -| nvme | Exposes NVMe statistics. | Linux | yes | -| os | Exposes os-release information. | Linux | yes | -| perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux | no | -| powersupplyclass | Collects information on power supplies. | any | yes | -| pressure | Exposes pressure stall statistics from /proc/pressure/. | Linux (kernel 4.20+ and/or CONFIG_PSI) | yes | -| processes | Exposes aggregate process statistics from /proc. | Linux | no | -| qdisc | Exposes queuing discipline statistics. | Linux | no | -| rapl | Exposes various statistics from /sys/class/powercap. | Linux | yes | -| runit | Exposes service status from runit. | any | no | -| schedstat | Exposes task scheduler statistics from /proc/schedstat. | Linux | yes | -| selinux | Exposes SELinux statistics. | Linux | yes | -| slabinfo | Exposes slab statistics from `/proc/slabinfo`. | Linux | no | -| softirqs | Exposes detailed softirq statistics from `/proc/softirqs`. | Linux | no | -| sockstat | Exposes various statistics from /proc/net/sockstat. | Linux | yes | -| softnet | Exposes statistics from /proc/net/softnet_stat. | Linux | yes | -| stat | Exposes various statistics from /proc/stat. This includes boot time, forks and interrupts. | Linux | yes | -| supervisord | Exposes service status from supervisord. | any | no | -| sysctl | Expose sysctl values from `/proc/sys`. | Linux | no | -| systemd | Exposes service and system status from systemd. | Linux | no | -| tapestats | Exposes tape device stats. | Linux | yes | -| tcpstat | Exposes TCP connection status information from /proc/net/tcp and /proc/net/tcp6. (Warning: the current version has potential performance issues in high load situations). | Linux | no | -| textfile | Collects metrics from files in a directory matching the filename pattern *.prom. The files must be using the text format defined here: https://prometheus.io/docs/instrumenting/exposition_formats/ | any | yes | -| thermal | Exposes thermal statistics. | Darwin | yes | -| thermal_zone | Exposes thermal zone & cooling device statistics from /sys/class/thermal. | Linux | yes | -| time | Exposes the current system time. | any | yes | -| timex | Exposes selected adjtimex(2) system call stats. | Linux | yes | -| udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from /proc/net/udp and /proc/net/udp6. | Linux | yes | -| uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD, NetBSD | yes | -| vmstat | Exposes statistics from /proc/vmstat. | Linux | yes | -| wifi | Exposes WiFi device and station statistics. | Linux | no | -| xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) | yes | -| zfs | Exposes ZFS performance statistics. | Linux, Solaris | yes | -| zoneinfo | Exposes zone stats. | Linux | no | - -```yaml - # Enables the node_exporter integration, allowing the Agent to automatically - # collect system metrics from the host UNIX system. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the node_exporter integration will be run but not scraped and thus not remote-written. Metrics for the - # integration will be exposed at /integrations/node_exporter/metrics and can - # be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timtout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Monitor the exporter itself and include those metrics in the results. - [include_exporter_metrics: | default = false] - - # Optionally defines the list of enabled-by-default collectors. - # Anything not provided in the list below will be disabled by default, - # but requires at least one element to be treated as defined. - # - # This is useful if you have a very explicit set of collectors you wish - # to run. - set_collectors: - - [] - - # Additional collectors to enable on top of the default set of enabled - # collectors or on top of the list provided by set_collectors. - # - # This is useful if you have a few collectors you wish to run that are - # not enabled by default, but do not want to explicitly provide an entire - # list through set_collectors. - enable_collectors: - - [] - - # Additional collectors to disable on top of the default set of disabled - # collectors. Takes precedence over enable_collectors. - # - # This is useful if you have a few collectors you do not want to run that - # are enabled by default, but do not want to explicitly provide an entire - # list through set_collectors. - disable_collectors: - - [] - - # procfs mountpoint. - [procfs_path: | default = "/proc"] - - # sysfs mountpoint. - [sysfs_path: | default = "/sys"] - - # rootfs mountpoint. If running in docker, the root filesystem of the host - # machine should be mounted and this value should be changed to the mount - # directory. - [rootfs_path: | default = "/"] - - # udev data path needed for diskstats from Node exporter. When running - # in Kubernetes it should be set to /host/root/run/udev/data. - [udev_data_path: | default = "/run/udev/data"] - - # Expose expensive bcache priority stats. - [enable_bcache_priority_stats: ] - - # Regexp of `bugs` field in cpu info to filter. - [cpu_bugs_include: ] - - # Enable the node_cpu_guest_seconds_total metric. - [enable_cpu_guest_seconds_metric: | default = true] - - # Enable the cpu_info metric for the cpu collector. - [enable_cpu_info_metric: | default = true] - - # Regexp of `flags` field in cpu info to filter. - [cpu_flags_include: ] - - # Regexp of devices to ignore for diskstats. - [diskstats_device_exclude: | default = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"] - - # Regexp of devices to include for diskstats. If set, the diskstat_device_exclude field is ignored. - [diskstats_device_include: ] - - # Regexp of ethtool devices to exclude (mutually exclusive with ethtool_device_include) - [ethtool_device_exclude: ] - - # Regexp of ethtool devices to include (mutually exclusive with ethtool_device_exclude) - [ethtool_device_include: ] - - # Regexp of ethtool stats to include. - [ethtool_metrics_include: | default = ".*"] - - # Regexp of mount points to ignore for filesystem collector. - [filesystem_mount_points_exclude: | default = "^/(dev|proc|sys|var/lib/docker/.+)($|/)"] - - # Regexp of filesystem types to ignore for filesystem collector. - [filesystem_fs_types_exclude: | default = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"] - - # How long to wait for a mount to respond before marking it as stale. - [filesystem_mount_timeout: | default = "5s"] - - # Array of IPVS backend stats labels. - # - # The default is [local_address, local_port, remote_address, remote_port, proto, local_mark]. - ipvs_backend_labels: - [- ] - - # NTP server to use for ntp collector - [ntp_server: | default = "127.0.0.1"] - - # NTP protocol version - [ntp_protocol_version: | default = 4] - - # Certify that the server address is not a public ntp server. - [ntp_server_is_local: | default = false] - - # IP TTL to use wile sending NTP query. - [ntp_ip_ttl: | default = 1] - - # Max accumulated distance to the root. - [ntp_max_distance: | default = "3466080us"] - - # Offset between local clock and local ntpd time to tolerate. - [ntp_local_offset_tolerance: | default = "1ms"] - - # Regexp of net devices to ignore for netclass collector. - [netclass_ignored_devices: | default = "^$"] - - # Ignore net devices with invalid speed values. This will default to true in - # node_exporter 2.0. - [netclass_ignore_invalid_speed_device: | default = false] - - # Enable collecting address-info for every device. - [netdev_address_info: ] - - # Regexp of net devices to exclude (mutually exclusive with include) - [netdev_device_exclude: | default = ""] - - # Regexp of net devices to include (mutually exclusive with exclude) - [netdev_device_include: | default = ""] - - # Regexp of fields to return for netstat collector. - [netstat_fields: | default = "^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans|TCPTimeouts)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$"] - - # List of CPUs from which perf metrics should be collected. - [perf_cpus: | default = ""] - - # Array of perf tracepoints that should be collected. - perf_tracepoint: - [- ] - - # Disable perf hardware profilers. - [perf_disable_hardware_profilers: | default = false] - - # Perf hardware profilers that should be collected. - perf_hardware_profilers: - [- ] - - # Disable perf software profilers. - [perf_disable_software_profilers: | default = false] - - # Perf software profilers that should be collected. - perf_software_profilers: - [- ] - - # Disable perf cache profilers. - [perf_disable_cache_profilers: | default = false] - - # Perf cache profilers that should be collected. - perf_cache_profilers: - [- ] - - # Regexp of power supplies to ignore for the powersupplyclass collector. - [powersupply_ignored_supplies: | default = "^$"] - - # Path to runit service directory. - [runit_service_dir: | default = "/etc/service"] - - # XML RPC endpoint for the supervisord collector. - # - # Setting SUPERVISORD_URL in the environment will override the default value. - # An explicit value in the YAML config takes precedence over the environment - # variable. - [supervisord_url: | default = "http://localhost:9001/RPC2"] - - # Numeric sysctl values to expose. - # For sysctl with multiple numeric values, - # an optional mapping can be given to expose each value as its own metric. - sysctl_include: - [- ] - - # String sysctl values to expose. - sysctl_include_info: - [- ] - - # Regexp of systemd units to include. Units must both match include and not - # match exclude to be collected. - [systemd_unit_include: | default = ".+"] - - # Regexp of systemd units to exclude. Units must both match include and not - # match exclude to be collected. - [systemd_unit_exclude: | default = ".+\\.(automount|device|mount|scope|slice)"] - - # Enables service unit tasks metrics unit_tasks_current and unit_tasks_max - [systemd_enable_task_metrics: | default = false] - - # Enables service unit metric service_restart_total - [systemd_enable_restarts_metrics: | default = false] - - # Enables service unit metric unit_start_time_seconds - [systemd_enable_start_time_metrics: | default = false] - - # Regexp of tapestats devices to ignore. - [tapestats_ignored_devices: | default = "^$"] - - # Directory to read *.prom files from for the textfile collector. - [textfile_directory: | default = ""] - - # Regexp of fields to return for the vmstat collector. - [vmstat_fields: | default = "^(oom_kill|pgpg|pswp|pg.*fault).*"] -``` diff --git a/docs/sources/static/configuration/integrations/oracledb-config.md b/docs/sources/static/configuration/integrations/oracledb-config.md deleted file mode 100644 index 2937c9f4d2..0000000000 --- a/docs/sources/static/configuration/integrations/oracledb-config.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/oracledb-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/oracledb-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/oracledb-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/oracledb-config/ -description: Learn about oracledb_config -title: oracledb_config ---- - -# oracledb_config - -The `oracledb_config` block configures the `oracledb` integration, -which is an embedded version of a forked version of the -[`oracledb_exporter`](https://github.com/observiq/oracledb_exporter). This allows the collection of third party [OracleDB](https://www.oracle.com/database/) metrics. - -Full reference of options: - -```yaml - # Enables the oracledb integration, allowing the Agent to automatically - # collect metrics for the specified oracledb instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured host:port of the connection string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the oracledb integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/oracledb/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The connection string used to connect to the OracleDB instance in the format - # of oracle://:@:/. - # i.e. "oracle://user:password@localhost:1521/orcl.localnet" - [connection_string: ] - - # The maximum amount of connections of the exporter allowed to be idle. - [max_idle_connections: ] - # The maximum amount of connections allowed to be open by the exporter. - [max_open_connections: ] - - # The number of seconds that will act as the query timeout when the exporter is querying against - # the OracleDB instance. - [query_timeout: | default = 5] -``` - -## Configuration example - -```yaml -integrations: - oracledb: - enabled: true - connection_string: oracle://user:password@localhost:1521/orcl.localnet - scrape_interval: 1m - scrape_timeout: 1m - scrape_integration: true -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/postgres-exporter-config.md b/docs/sources/static/configuration/integrations/postgres-exporter-config.md deleted file mode 100644 index 1bd2354c9e..0000000000 --- a/docs/sources/static/configuration/integrations/postgres-exporter-config.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/postgres-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/postgres-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/postgres-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/postgres-exporter-config/ -description: Learn about postgres_exporter_config -title: postgres_exporter_config ---- - -# postgres_exporter_config - -The `postgres_exporter_config` block configures the `postgres_exporter` -integration, which is an embedded version of -[`postgres_exporter`](https://github.com/prometheus-community/postgres_exporter). This -allows for the collection of metrics from Postgres servers. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/postgres_exporter#running-as-non-superuser). - -Full reference of options: - -```yaml - # Enables the postgres_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured postgres server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from a truncated version of - # the first DSN in data_source_names. The truncated DSN includes the hostname - # and database name (if used) of the server, but does not include any user - # information. - # - # If data_source_names contains more than one entry, the integration will fail to - # load and a value for instance must be manually provided. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the postgres_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/postgres_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Data Source Names specifies the Postgres server(s) to connect to. This is - # REQUIRED but may also be specified by the POSTGRES_EXPORTER_DATA_SOURCE_NAME - # environment variable, where DSNs the environment variable are separated by - # commas. If neither are set, the integration will fail to start. - # - # The format of this is specified here: https://pkg.go.dev/github.com/lib/pq#ParseURL - # - # A working example value for a server with a password is: - # "postgresql://username:passwword@localhost:5432/database?sslmode=disable" - # - # Multiple DSNs may be provided here, allowing for scraping from multiple - # servers. - data_source_names: - - - - # Disables collection of metrics from pg_settings. - [disable_settings_metrics: | default = false] - - # Autodiscover databases to collect metrics from. If false, only collects - # metrics from databases collected from data_source_names. - [autodiscover_databases: | default = false] - - # Excludes specific databases from being collected when autodiscover_databases - # is true. - exclude_databases: - [ - ] - - # Includes only specific databases (excluding all others) when autodiscover_databases - # is true. - include_databases: - [ - ] - - # Path to a YAML file containing custom queries to run. Check out - # postgres_exporter's queries.yaml for examples of the format: - # https://github.com/prometheus-community/postgres_exporter/blob/master/queries.yaml - [query_path: | default = ""] - - # When true, only exposes metrics supplied from query_path. - [disable_default_metrics: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/process-exporter-config.md b/docs/sources/static/configuration/integrations/process-exporter-config.md deleted file mode 100644 index c6e888df77..0000000000 --- a/docs/sources/static/configuration/integrations/process-exporter-config.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/process-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/process-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/process-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/process-exporter-config/ -description: Learn about process_exporter_config -title: process_exporter_config ---- - -# process_exporter_config - -The `process_exporter_config` block configures the `process_exporter` integration, -which is an embedded version of -[`process-exporter`](https://github.com/ncabatoff/process-exporter) -and allows for collection metrics based on the /proc filesystem on Linux -systems. Note that on non-Linux systems, enabling this exporter is a no-op. - -Note that if running the Agent in a container, you will need to bind mount -folders from the host system so the integration can monitor them: - -``` -docker run \ - -v "/proc:/proc:ro" \ - -v /tmp/agent:/etc/agent \ - -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} \ - --config.file=/etc/agent-config/agent.yaml -``` - -Replace `/path/to/config.yaml` with the appropriate path on your host system -where an Agent config file can be found. - -For running on Kubernetes, ensure to set the equivalent mounts and capabilities -there as well: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: agent -spec: - containers: - - image: grafana/agent:{{< param "AGENT_RELEASE" >}} - name: agent - args: - - --config.file=/etc/agent-config/agent.yaml - volumeMounts: - - name: procfs - mountPath: /proc - readOnly: true - volumes: - - name: procfs - hostPath: - path: /proc -``` - -The manifest and Tanka configs provided by this repository do not have the -mounts or capabilities required for running this integration. - -An example config for `process_exporter_config` that tracks all processes is the -following: - -``` -enabled: true -process_names: -- name: "{{.Comm}}" - cmdline: - - '.+' -``` - -Full reference of options: - -```yaml - # Enables the process_exporter integration, allowing the Agent to automatically - # collect system metrics from the host UNIX system. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the process_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/process_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # procfs mountpoint. - [procfs_path: | default = "/proc"] - - # If a proc is tracked, track with it any children that aren't a part of their - # own group. - [track_children: | default = true] - - # Report on per-threadname metrics as well. - [track_threads: | default = true] - - # Gather metrics from smaps file, which contains proportional resident memory - # size. - [gather_smaps: | default = true] - - # Recheck process names on each scrape. - [recheck_on_scrape: | default = false] - - # A collection of matching rules to use for deciding which processes to - # monitor. Each config can match multiple processes to be tracked as a single - # process "group." - process_names: - [- ] -``` - -## process_matcher_config - -```yaml -# The name to use for identifying the process group name in the metric. By -# default, it uses the base path of the executable. -# -# The following template variables are available: -# -# - {{.Comm}}: Basename of the original executable from /proc//stat -# - {{.ExeBase}}: Basename of the executable from argv[0] -# - {{.ExeFull}}: Fully qualified path of the executable -# - {{.Username}}: Username of the effective user -# - {{.Matches}}: Map containing all regex capture groups resulting from -# matching a process with the cmdline rule group. -# - {{.PID}}: PID of the process. Note that the PID is copied from the -# first executable found. -# - {{.StartTime}}: The start time of the process. This is useful when combined -# with PID as PIDS get reused over time. -# - `{{.Cgroups}}`: The cgroups, if supported, of the process (`/proc/self/cgroup`). This is particularly useful for identifying to which container a process belongs. -# -# **NOTE**: Using `PID` or `StartTime` is discouraged, as it is almost never what you want, and is likely to result in high cardinality metrics. - - -[name: | default = "{{.ExeBase}}"] - -# A list of strings that match the base executable name for a process, truncated -# at 15 characters. It is derived from reading the second field of -# /proc//stat minus the parens. -# -# If any of the strings match, the process will be tracked. -comm: - [- ] - -# A list of strings that match argv[0] for a process. If there are no slashes, -# only the basename of argv[0] needs to match. Otherwise the name must be an -# exact match. For example, "postgres" may match any postgres binary but -# "/usr/local/bin/postgres" can only match a postgres at that path exactly. -# -# If any of the strings match, the process will be tracked. -exe: - [- ] - -# A list of regular expressions applied to the argv of the process. Each -# regex here must match the corresponding argv for the process to be tracked. -# The first element that is matched is argv[1]. -# -# Regex Captures are added to the .Matches map for use in the name. -cmdline: - [- ] -``` diff --git a/docs/sources/static/configuration/integrations/redis-exporter-config.md b/docs/sources/static/configuration/integrations/redis-exporter-config.md deleted file mode 100644 index 392fcb359c..0000000000 --- a/docs/sources/static/configuration/integrations/redis-exporter-config.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/redis-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/redis-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/redis-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/redis-exporter-config/ -description: Learn about redis_exporter_config -title: redis_exporter_config ---- - -# redis_exporter_config - -The `redis_exporter_config` block configures the `redis_exporter` integration, which is an embedded version of [`redis_exporter`](https://github.com/oliver006/redis_exporter). This allows for the collection of metrics from Redis servers. - -Note that currently, an Agent can only collect metrics from a single Redis server. If you want to collect metrics from multiple Redis servers, you can run multiple Agents and add labels using `relabel_configs` to differentiate between the Redis servers: - -```yaml -redis_exporter: - enabled: true - redis_addr: "redis-2:6379" - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: redis-2 -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/oliver006/redis_exporter#authenticating-with-redis). - -Full reference of options: -```yaml - # Enables the redis_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured redis address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of redis_addr. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the redis_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/redis_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Monitor the exporter itself and include those metrics in the results. - [include_exporter_metrics: | default = false] - - # exporter-specific configuration options - - # Address of the redis instance. - redis_addr: - - # User name to use for authentication (Redis ACL for Redis 6.0 and newer). - [redis_user: ] - - # Password of the redis instance. - [redis_password: ] - - # Path of a file containing a passord. If this is defined, it takes precedece - # over redis_password. - [redis_password_file: ] - - # Path of a file containing a JSON object which maps Redis URIs [string] to passwords [string] - # (e.g. {"redis://localhost:6379": "sample_password"}). - [redis_password_map_file: ] - - # Namespace for the metrics. - [namespace: | default = "redis"] - - # What to use for the CONFIG command. - [config_command: | default = "CONFIG"] - - # Comma separated list of key-patterns to export value and length/size, searched for with SCAN. - [check_keys: ] - - # Comma separated list of LUA regex for grouping keys. When unset, no key - # groups will be made. - [check_key_groups: ] - - # Check key or key groups batch size hint for the underlying SCAN. Keeping the same name for backwards compatibility, but this applies to both key and key groups batch size configuration. - [check_key_groups_batch_size: | default = 10000] - - # The maximum number of distinct key groups with the most memory utilization - # to present as distinct metrics per database. The leftover key groups will be - # aggregated in the 'overflow' bucket. - [max_distinct_key_groups: | default = 100] - - # Comma separated list of single keys to export value and length/size. - [check_single_keys: ] - - # Comma separated list of stream-patterns to export info about streams, groups and consumers, searched for with SCAN. - [check_streams: ] - - # Comma separated list of single streams to export info about streams, groups and consumers. - [check_single_streams: ] - - # Whether to export key values as labels when using `check_keys` or `check_single_keys`. - [export_key_values: | default = true] - - # Comma separated list of individual keys to export counts for. - [count_keys: ] - - # Comma-separated list of paths to Lua Redis scripts for collecting extra metrics. - [script_path: ] - - # Timeout for connection to Redis instance (in Golang duration format). - [connection_timeout: | default = "15s"] - - # Name of the client key file (including full path) if the server requires TLS client authentication. - [tls_client_key_file: ] - - # Name of the client certificate file (including full path) if the server requires TLS client authentication. - [tls_client_cert_file: ] - - # Name of the CA certificate file (including full path) if the server requires TLS client authentication. - [tls_ca_cert_file: ] - - # Whether to set client name to redis_exporter. - [set_client_name: ] - - # Whether to scrape Tile38 specific metrics. - [is_tile38: ] - - # Whether this is a redis cluster (Enable this if you need to fetch key level data on a Redis Cluster). - [is_cluster: | default = false] - - # Whether to scrape Client List specific metrics. - [export_client_list: ] - - # Whether to include the client's port when exporting the client list. Note - # that including this will increase the cardinality of all redis metrics. - [export_client_port: ] - - # Whether to also export go runtime metrics. - [redis_metrics_only: ] - - # Whether to ping the redis instance after connecting. - [ping_on_connect: ] - - # Whether to include system metrics like e.g. redis_total_system_memory_bytes. - [incl_system_metrics: ] - - # Whether to to skip TLS verification. - [skip_tls_verification: ] -``` diff --git a/docs/sources/static/configuration/integrations/snmp-config.md b/docs/sources/static/configuration/integrations/snmp-config.md deleted file mode 100644 index bd8cfcfe62..0000000000 --- a/docs/sources/static/configuration/integrations/snmp-config.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/snmp-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/snmp-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/snmp-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/snmp-config/ -description: Learn about snmp config -title: snmp config ---- - -# snmp config - -The `snmp` block configures the `snmp` integration, -which is an embedded version of -[`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. - -{{< admonition type="note" >}} -`snmp config` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{< /admonition >}} - -## Quick configuration example - -To get started, define SNMP targets in Grafana agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - snmp: - enabled: true - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - - name: network_router_2 - address: 192.168.1.3 - module: mikrotik - walk_params: private - auth: private - walk_params: - private: - retries: 2 - public: - retries: 1 -``` - -## Prometheus service discovery use case - -If you need to scrape SNMP devices in more dynamic environment, and can't define devices in `snmp_targets` because targets would change over time, you can use service discovery approach. For instance, with [DNS discovery](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#dns_sd_config): - -```yaml - -metrics: - wal_directory: /tmp/wal - configs: - - name: snmp_targets - scrape_configs: - - job_name: 'snmp' - dns_sd_configs: - - names: - - switches.srv.example.org - - routers.srv.example.org - params: - module: [if_mib] - walk_params: [private] - auth: [private] - metrics_path: /integrations/snmp/metrics - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - replacement: 127.0.0.1:12345 # address must match grafana agent -server.http.address flag - target_label: __address__ -integrations: - snmp: - enabled: true - scrape_integration: false # set autoscrape to off - walk_params: - private: - retries: 2 -``` - - -Full reference of options: - -```yaml - # Enables the snmp integration, allowing the Agent to automatically - # collect metrics for the specified github objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the snmp integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/snmp/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # SNMP configuration file with custom modules. - # See https://github.com/prometheus/snmp_exporter#generating-configuration for more details how to generate custom snmp.yml file. - [config_file: | default = ""] - - # Embedded SNMP configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/snmp_exporter/tree/main#generating-configuration for more details how to specify your SNMP modules. - # If this and config_file are not defined, embedded snmp_exporter default set of modules is used. - snmp_config: - [- ... ] - [- ... ] - - # List of SNMP targets to poll - snmp_targets: - [- ... ] - - # Map of SNMP connection profiles that can be used to override default SNMP settings. - walk_params: - [ : ... ] - - -``` -## snmp_target config - -```yaml - # Name of a snmp_target - [name: ] - - # The address of SNMP device - [address: ] - - # SNMP module to use for polling - [module: | default = ""] - - # SNMP authentication profile to use - [auth: | default = ""] - - # walk_param config to use for this snmp_target - [walk_params: | default = ""] -``` - -## walk_param config - -```yaml - # How many objects to request with GET/GETBULK, defaults to 25. - # May need to be reduced for buggy devices. - [max_repetitions: | default = 25] - - # How many times to retry a failed request, defaults to 3. - [retries: | default = 3] - - # Timeout for each SNMP request, defaults to 5s. - [timeout: | default = 5s] -``` - - -## About SNMP modules - -SNMP module is the set of SNMP counters to be scraped together from the specific network device. - -SNMP modules available can be found in the embedded snmp.yml file [here](https://github.com/grafana/agent/blob/main/internal/static/integrations/snmp_exporter/common/snmp.yml). If not specified, `if_mib` module is used. - -If you need to use custom SNMP modules, you can [generate](https://github.com/prometheus/snmp_exporter#generating-configuration) your own snmp.yml file and specify it using `config_file` parameter. diff --git a/docs/sources/static/configuration/integrations/snowflake-config.md b/docs/sources/static/configuration/integrations/snowflake-config.md deleted file mode 100644 index c648445a2d..0000000000 --- a/docs/sources/static/configuration/integrations/snowflake-config.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/snowflake-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/snowflake-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/snowflake-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/snowflake-config/ -description: Learn about snowflake_config -title: snowflake_config ---- - -# snowflake_config - -The `snowflake_config` block configures the `snowflake` integration, -which is an embedded version of -[`snowflake-prometheus-exporter`](https://github.com/grafana/snowflake-prometheus-exporter). This allows the collection of [Snowflake](https://www.snowflake.com/) metrics. - -Full reference of options: - -```yaml - # Enables the snowflake integration, allowing the Agent to automatically - # collect metrics for the specified snowflake account. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured account_name. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the snowflake integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/snowflake/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # from the integration that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The account name of the snowflake account to monitor. - account_name: - - # Username for the database user used to scrape metrics. - username: - - # Password for the database user used to scrape metrics. - password: - - # The warehouse to use when querying metrics. - warehouse: - - # The role to use when connecting to the database. The ACCOUNTADMIN role is used by default. - [role: | default = "ACCOUNTADMIN"] - -``` -## Quick configuration example - -```yaml -integrations: - snowflake: - enabled: true - account_name: XXXXXXX-YYYYYYY - username: snowflake-user - password: snowflake-pass - warehouse: SNOWFLAKE_WAREHOUSE - role: ACCOUNTADMIN -``` diff --git a/docs/sources/static/configuration/integrations/squid-config.md b/docs/sources/static/configuration/integrations/squid-config.md deleted file mode 100644 index 6bfff685a3..0000000000 --- a/docs/sources/static/configuration/integrations/squid-config.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/squid-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/squid-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/squid-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/squid-config/ -description: Learn about squid_config -title: squid_config ---- - -# squid_config - -The `squid_config` block configures the `squid` integration, -which is an embedded version of a forked version of the [`Squid_exporter`](https://github.com/boynux/squid-exporter). This integration allows you to collect third-party [Squid](http://www.squid-cache.org/) metrics. - -Full reference of options: - -```yaml - # Enables the Squid integration, allowing the Agent to automatically - # collect metrics for the specified Squid instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured host:port of the connection string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the Squid integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/squid/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The address used to connect to the Squid instance in the format - # of :. - # i.e. "localhost:3128" - [address: ] - - # The username for squid instance. - [username: ] - - # The password for username above. - [password: ] -``` - -## Configuration example - -```yaml -integrations: - squid: - enabled: true - address: localhost:3128 - scrape_interval: 1m - scrape_timeout: 1m - scrape_integration: true -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/statsd-exporter-config.md b/docs/sources/static/configuration/integrations/statsd-exporter-config.md deleted file mode 100644 index 87c3145895..0000000000 --- a/docs/sources/static/configuration/integrations/statsd-exporter-config.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/statsd-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/statsd-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/statsd-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/statsd-exporter-config/ -description: Learn about statsd_exporter_config -title: statsd_exporter_config ---- - -# statsd_exporter_config - -The `statsd_exporter_config` block configures the `statsd_exporter` -integration, which is an embedded version of -[`statsd_exporter`](https://github.com/prometheus/statsd_exporter). This allows -for the collection of statsd metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the statsd_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured statsd server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the statsd_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/statsd_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The UDP address on which to receive statsd metric lines. An empty string - # will disable UDP collection. - [listen_udp: | default = ":9125"] - - # The TCP address on which to receive statsd metric lines. An empty string - # will disable TCP collection. - [listen_tcp: | default = ":9125"] - - # The Unixgram socket path to receive statsd metric lines. An empty string - # will disable unixgram collection. - [listen_unixgram: | default = ""] - - # The permission mode of the unixgram socket, when enabled. - [unix_socket_mode: | default = "755"] - - # An optional mapping config that can translate dot-separated StatsD metrics - # into labeled Prometheus metrics. For full instructions on how to write this - # object, see the official documentation from the statsd_exporter: - # - # https://github.com/prometheus/statsd_exporter#metric-mapping-and-configuration - # - # Note that a SIGHUP will not reload this config. - [mapping_config: ] - - # Size (in bytes) of the operating system's transmit read buffer associated - # with the UDP or unixgram connection. Please make sure the kernel parameters - # net.core.rmem_max is set to a value greater than the value specified. - [read_buffer: | default = 0] - - # Maximum size of your metric mapping cache. Relies on least recently used - # replacement policy if max size is reached. - [cache_size: | default = 1000] - - # Metric mapping cache type. Valid values are "lru" and "random". - [cache_type: | default = "lru"] - - # Size of internal queue for processing events. - [event_queue_size: | default = 10000] - - # Number of events to hold in queue before flushing. - [event_flush_threshold: | default = 1000] - - # Number of events to hold in queue before flushing. - [event_flush_interval: | default = "200ms"] - - # Parse DogStatsd style tags. - [parse_dogstatsd_tags: | default = true] - - # Parse InfluxDB style tags. - [parse_influxdb_tags: | default = true] - - # Parse Librato style tags. - [parse_librato_tags: | default = true] - - # Parse SignalFX style tags. - [parse_signalfx_tags: | default = true] - - # Optional: Relay address configuration. This setting, if provided, - # specifies the destination to forward your metrics. - - # Note that it must be a UDP endpoint in the format 'host:port'. - [relay_address: ] - - # Maximum relay output packet length to avoid fragmentation. - [relay_packet_length: | default = 1400] -``` diff --git a/docs/sources/static/configuration/integrations/windows-exporter-config.md b/docs/sources/static/configuration/integrations/windows-exporter-config.md deleted file mode 100644 index bcb753b086..0000000000 --- a/docs/sources/static/configuration/integrations/windows-exporter-config.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/windows-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/windows-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/windows-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/windows-exporter-config/ -description: Learn about windows_exporter_config -title: windows_exporter_config ---- - -# windows_exporter_config - -The `windows_exporter_config` block configures the `windows_exporter` -integration, which is an embedded version of -[`windows_exporter`](https://github.com/grafana/windows_exporter). This allows -for the collection of Windows metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the windows_exporter integration, allowing the Agent to automatically - # collect system metrics from the local windows instance - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the consul_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/windows_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # List of collectors to enable. Any non-experimental collector from the - # embedded version of windows_exporter can be enabled here. - [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system"] - - # Settings for collectors which accept configuration. Settings specified here - # are only used if the corresponding collector is enabled in - # enabled_collectors. - - # Configuration for Exchange Mail Server - exchange: - # Comma-separated List of collectors to use. Defaults to all, if not specified. - # Maps to collectors.exchange.enabled in windows_exporter - [enabled_list: ] - - # Configuration for the IIS web server - iis: - # Regexp of sites to whitelist. Site name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.site-whitelist in windows_exporter - [site_whitelist: | default = ".+"] - - # Regexp of sites to blacklist. Site name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.site-blacklist in windows_exporter - [site_blacklist: | default = ""] - - # Regexp of apps to whitelist. App name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.app-whitelist in windows_exporter - [app_whitelist: | default=".+"] - - # Regexp of apps to blacklist. App name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.app-blacklist in windows_exporter - [app_blacklist: | default=".+"] - - # Configuration for reading metrics from a text files in a directory - text_file: - # Directory to read text files with metrics from. - # Maps to collector.textfile.directory in windows_exporter - [text_file_directory: | default="C:\Program Files\windows_exporter\textfile_inputs"] - - # Configuration for SMTP metrics - smtp: - # Regexp of virtual servers to whitelist. Server name must both match whitelist and not match blacklist to be included. - # Maps to collector.smtp.server-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of virtual servers to blacklist. Server name must both match whitelist and not match blacklist to be included. - # Maps to collector.smtp.server-blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for Windows Services - service: - # "WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response. - # Maps to collector.service.services-where in windows_exporter - [where_clause: | default=""] - - # Configuration for physical disk on Windows - physical_disk: - # Regexp of volumes to include. Disk name must both match include and not match exclude to be included. - # Maps to collector.logical_disk.disk-include in windows_exporter. - [include: | default=".+"] - - # Regexp of volumes to exclude. Disk name must both match include and not match exclude to be included. - # Maps to collector.logical_disk.disk-exclude in windows_exporter. - [exclude: | default=".+"] - - # Configuration for Windows Processes - process: - # Regexp of processes to include. Process name must both match whitelist and not match blacklist to be included. - # Maps to collector.process.whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of processes to exclude. Process name must both match whitelist and not match blacklist to be included. - # Maps to collector.process.blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for NICs - network: - # Regexp of NIC's to whitelist. NIC name must both match whitelist and not match blacklist to be included. - # Maps to collector.net.nic-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of NIC's to blacklist. NIC name must both match whitelist and not match blacklist to be included. - # Maps to collector.net.nic-blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for Microsoft SQL Server - mssql: - # Comma-separated list of mssql WMI classes to use. - # Maps to collectors.mssql.classes-enabled in windows_exporter - [enabled_classes: | default="accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats,sqlerrors,transactions"] - - # Configuration for Microsoft Queue - msqm: - # WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response. - # Maps to collector.msmq.msmq-where in windows_exporter - [where_clause: | default=""] - - # Configuration for disk information - logical_disk: - # Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included. - # Maps to collector.logical_disk.volume-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included. - # Maps to collector.logical_disk.volume-blacklist in windows_exporter - [blacklist: | default=".+"] - - # Configuration for Windows Task Scheduler - scheduled_task: - # Regexp of tasks to include. - [include: | default ".+"] - #Regexp of tasks to exclude. - [exclude: | default ""] -``` diff --git a/docs/sources/static/configuration/logs-config.md b/docs/sources/static/configuration/logs-config.md deleted file mode 100644 index 56d8d06773..0000000000 --- a/docs/sources/static/configuration/logs-config.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -aliases: -- ../../configuration/logs-config/ -- ../../configuration/loki-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/logs-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/logs-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/logs-config/ -description: Learn about logs_config -title: logs_config -weight: 300 ---- - -# logs_config - -The `logs_config` block configures how the Agent collects logs and sends them to -a Loki push API endpoint. `logs_config` is identical to how Promtail is -configured, except deprecated fields have been removed and the server_config is -not supported. - -Refer to the -[Promtail documentation](/docs/loki/latest/clients/promtail/configuration/#clients) -for the supported values for these fields. - -```yaml -# Directory to store Loki Promtail positions files in. Positions files are -# required to read logs, and are used to store the last read offset of log -# sources. The positions files will be stored in -# /.yml. -# -# Optional only if every config has a positions.filename manually provided. -# -# This directory will be automatically created if it doesn't exist. -[positions_directory: ] - -# Configure values for all Loki Promtail instances. -[global: ] - -# Loki Promtail instances to run for log collection. -configs: - - [] -``` - -## global_config - -The `global_config` block configures global values for all launched Loki Promtail -instances. - -```yaml -clients: - - [] -# Configure how frequently log files from disk get polled for changes. -[file_watch_config: ] - -``` - -> **Note:** More information on the following types can be found on the -> documentation for Promtail: -> -> * [`promtail.client_config`](/docs/loki/latest/clients/promtail/configuration/#clients) - - -## file_watch_config - -The `file_watch_config` block configures how often to poll log files from disk -for changes: - -```yaml -# Minimum frequency to poll for files. Any time file changes are detected, the -# poll frequency gets reset to this duration. - [min_poll_frequency: | default = "250ms"] - # Maximum frequency to poll for files. Any time no file changes are detected, - # the poll frequency doubles in value up to the maximum duration specified by - # this value. - # - # The default is set to the same as min_poll_frequency. - [max_poll_frequency: | default = "250ms"] -``` - -## logs_instance_config - -The `logs_instance_config` block is an individual instance of Promtail with its -own set of scrape rules and where to forward logs. It is identical to how -Promtail is configured, except deprecated fields have been removed and the -`server_config` block is not supported. - -```yaml -# Name of this config. Required, and must be unique across all Loki configs. -# The name of the config will be the value of a logs_config label for all -# Loki Promtail metrics. -name: - -clients: - - [] - -# Optional configuration for where to store the positions files. If -# positions.filename is left empty, the file will be stored in -# /.yml. -# -# The directory of the positions file will automatically be created on start up -# if it doesn't already exist.. -[positions: ] - -scrape_configs: - - [] - -[target_config: ] - -[limits_config: ] -``` -> **Note:** More information on the following types can be found on the -> documentation for Promtail: -> -> * [`promtail.client_config`](/docs/loki/latest/clients/promtail/configuration/#clients) -> * [`promtail.scrape_config`](/docs/loki/latest/clients/promtail/configuration/#scrape_configs) -> * [`promtail.target_config`](/docs/loki/latest/clients/promtail/configuration/#target_config) -> * [`promtail.limits_config`](/docs/loki/latest/clients/promtail/configuration/#limits_config) - -> **Note:** Backticks in values are not supported. - -> **Note:** Because of how YAML treats backslashes in double-quoted strings, -> all backslashes in a regex expression must be escaped when using double -> quotes. But because of double processing, in Grafana Agent config file -> you must use quadruple backslash (`\\\\`) construction to add backslashes -> into regular expressions, here is example for `name=(\w+)\s` regex: -``` - selector: '{app="my-app"} |~ "name=(\\\\w+)\\\\s"' -``` - -Using single or double backslash construction produces the error: -``` -failed to make file target manager: invalid match stage config: invalid selector syntax for match stage: parse error at line 1, col 40: literal not terminated -``` -Using backticks produces the error: -``` -invalid match stage config: invalid selector syntax for match stage: parse error at line 1, col 51: syntax error: unexpected IDENTIFIER, expecting STRING" -``` diff --git a/docs/sources/static/configuration/metrics-config.md b/docs/sources/static/configuration/metrics-config.md deleted file mode 100644 index 296d3700b0..0000000000 --- a/docs/sources/static/configuration/metrics-config.md +++ /dev/null @@ -1,352 +0,0 @@ ---- -aliases: -- ../../configuration/metrics-config/ -- ../../configuration/prometheus-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/metrics-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/metrics-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/metrics-config/ -description: Learn about metrics_config -title: metrics_config -weight: 200 ---- - -# metrics_config - -The `metrics_config` block is used to define a collection of metrics -instances. Each instance defines a collection of Prometheus-compatible -scrape_configs and remote_write rules. Most users will only need to -define one instance. - -```yaml -# Configures the optional scraping service to cluster agents. -[scraping_service: ] - -# Configures the gRPC client used for agents to connect to other -# clustered agents. -[scraping_service_client: ] - -# Configure values for all Prometheus instances. -[global: ] - -# Configure the directory used by instances to store their WAL. -# -# The Grafana Agent assumes that all folders within wal_directory are managed by -# the agent itself. This means if you are using a PVC, you must point -# wal_directory to a subdirectory of the PVC mount. -[wal_directory: | default = "data-agent/"] - -# Configures how long ago an abandoned (not associated with an instance) WAL -# may be written to before being eligible to be deleted -[wal_cleanup_age: | default = "12h"] - -# Configures how often checks for abandoned WALs to be deleted are performed. -# A value of 0 disables periodic cleanup of abandoned WALs -[wal_cleanup_period: | default = "30m"] - -# Allows to disable HTTP Keep-Alives when scraping; the Agent will only use -# outgoing each connection for a single request. -[http_disable_keepalives: | default = false] - -# Allows to configure the maximum amount of time an idle Keep-Alive connection -# can remain idle before closing itself. Zero means no limit. -# The setting is ignored when `http_disable_keepalives` is enabled. -[http_idle_conn_timeout: | default = "5m"] - -# The list of Prometheus instances to launch with the agent. -configs: - [- ] - -# If an instance crashes abnormally, how long should we wait before trying -# to restart it. 0s disables the backoff period and restarts the agent -# immediately. -[instance_restart_backoff: | default = "5s"] - -# How to spawn instances based on instance configs. Supported values: shared, -# distinct. -[instance_mode: | default = "shared"] -``` - -## scraping_service_config - -The `scraping_service` block configures the [scraping service][scrape], an operational -mode where configurations are stored centrally in a KV store and a cluster of -agents distributes discovery and scrape load between nodes. - -```yaml -# Whether to enable scraping service mode. When enabled, local configs -# cannot be used. -[enabled: | default = false] - -# Note these next 3 configuration options are confusing. Due to backwards compatibility the naming -# is less than ideal. - -# How often should the agent manually refresh the configuration. Useful for if KV change -# events are not sent by an agent. -[reshard_interval: | default = "1m"] - -# The timeout for configuration refreshes. This can occur on cluster events or -# on the reshard interval. A timeout of 0 indicates no timeout. -[reshard_timeout: | default = "30s"] - -# The timeout for a cluster reshard events. A timeout of 0 indicates no timeout. -[cluster_reshard_event_timeout: | default = "30s"] - -# Configuration for the KV store to store configurations. -kvstore: - -# When set, allows configs pushed to the KV store to specify configuration -# fields that can read secrets from files. -# -# This is disabled by default. When enabled, a malicious user can craft an -# instance config that reads arbitrary files on the machine the Agent runs -# on and sends its contents to a specically crafted remote_write endpoint. -# -# If enabled, ensure that no untrusted users have access to the Agent API. -[dangerous_allow_reading_files: ] - -# Configuration for how agents will cluster together. -lifecycler: -``` - -## kvstore_config - -The `kvstore_config` block configures the KV store used as storage for -configurations in the scraping service mode. - -```yaml -# Which underlying KV store to use. Can be either consul or etcd -[store: | default = ""] - -# Key prefix to store all configurations with. Must end in /. -[prefix: | default = "configurations/"] - -# Configuration for a Consul client. Only applies if store -# is "consul" -consul: - # The hostname and port of Consul. - [host: | duration = "localhost:8500"] - - # The ACL Token used to interact with Consul. - [acltoken: ] - - # The HTTP timeout when communicating with Consul - [httpclienttimeout: | default = 20s] - - # Whether or not consistent reads to Consul are enabled. - [consistentreads: | default = true] - -# Configuration for an ETCD v3 client. Only applies if -# store is "etcd" -etcd: - # The ETCD endpoints to connect to. - endpoints: - - - - # The Dial timeout for the ETCD connection. - [dial_tmeout: | default = 10s] - - # The maximum number of retries to do for failed ops to ETCD. - [max_retries: | default = 10] -``` - -## lifecycler_config - -The `lifecycler_config` block configures the lifecycler; the component that -Agents use to cluster together. - -```yaml -# Configures the distributed hash ring storage. -ring: - # KV store for getting and sending distributed hash ring updates. - kvstore: - - # Specifies when other agents in the clsuter should be considered - # unhealthy if they haven't sent a heartbeat within this duration. - [heartbeat_timeout: | default = "1m"] - -# Number of tokens to generate for the distributed hash ring. -[num_tokens: | default = 128] - -# How often agents should send a heartbeat to the distributed hash -# ring. -[heartbeat_period: | default = "5s"] - -# How long to wait for tokens from other agents after generating -# a new set to resolve collisions. Useful only when using a gossip -# KV store. -[observe_period: | default = "0s"] - -# Period to wait before joining the ring. 0s means to join immediately. -[join_after: | default = "0s"] - -# Minimum duration to wait before marking the agent as ready to receive -# traffic. Used to work around race conditions for multiple agents exiting -# the distributed hash ring at the same time. -[min_ready_duration: | default = "1m"] - -# Network interfaces to resolve addresses defined by other agents -# registered in distributed hash ring. -[interface_names: | default = ["eth0", "en0"]] - -# Duration to sleep before exiting. Ensures that metrics get scraped -# before the process quits. -[final_sleep: | default = "30s"] - -# File path to store tokens. If empty, tokens will not be stored during -# shutdown and will not be restored at startup. -[tokens_file_path: | default = ""] - -# Availability zone of the host the agent is running on. Default is an -# empty string which disables zone awareness for writes. -[availability_zone: | default = ""] -``` - -## scraping_service_client_config - -The `scraping_service_client_config` block configures how clustered Agents will -generate gRPC clients to connect to each other. - -```yaml -grpc_client_config: - # Maximum size in bytes the gRPC client will accept from the connected server. - [max_recv_msg_size: | default = 104857600] - - # Maximum size in bytes the gRPC client will sent to the connected server. - [max_send_msg_size: | default = 16777216] - - # Whether messages should be gzipped. - [use_gzip_compression: | default = false] - - # The rate limit for gRPC clients; 0 means no rate limit. - [rate_limit: | default = 0] - - # gRPC burst allowed for rate limits. - [rate_limit_burst: | default = 0] - - # Controls if when a rate limit is hit whether the client should - # retry the request. - [backoff_on_ratelimits: | default = false] - - # Configures the retry backoff when backoff_on_ratelimits is - # true. - backoff_config: - # The minimum delay when backing off. - [min_period: | default = "100ms"] - - # The maximum delay when backing off. - [max_period: | default = "10s"] - - # The number of times to backoff and retry before failing. - [max_retries: | default = 10] -``` - -## global_config - -The `global_config` block configures global values for all launched Prometheus -instances. - -```yaml -# How frequently should Prometheus instances scrape. -[scrape_interval: duration | default = "1m"] - -# How long to wait before timing out a scrape from a target. -[scrape_timeout: duration | default = "10s"] - -# A list of static labels to add for all metrics. -external_labels: - { : } - -# Default set of remote_write endpoints. If an instance doesn't define any -# remote_writes, it will use this list. -remote_write: - - [] -``` - -> **Note:** For more information on remote_write, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write). -> -> The following default values set by Grafana Agent Static Mode are different than the default set by Prometheus: -> - `remote_write`: `send_exemplars` default value is `true` -> - `remote_write`: `queue_config`: `retry_on_http_429` default value is `true` - -## metrics_instance_config - -The `metrics_instance_config` block configures an individual metrics -instance, which acts as its own mini Prometheus-compatible agent, though -without support for the TSDB. - -```yaml -# Name of the instance. Must be present. Will be added as a label to agent -# metrics. -name: string - -# Whether this agent instance should only scrape from targets running on the -# same machine as the agent process. -[host_filter: | default = false] - -# Relabel configs to apply against discovered targets. The relabeling is -# temporary and just used for filtering targets. -host_filter_relabel_configs: - [ - ... ] - -# How frequently the WAL truncation process should run. Every iteration of -# the truncation will checkpoint old series and remove old samples. If data -# has not been sent within this window, some of it may be lost. -# -# The size of the WAL will increase with less frequent truncations. Making -# truncations more frequent reduces the size of the WAL but increases the -# chances of data loss when remote_write is failing for longer than the -# specified frequency. -[wal_truncate_frequency: | default = "60m"] - -# The minimum amount of time that series and samples should exist in the WAL -# before being considered for deletion. The consumed disk space of the WAL will -# increase by making this value larger. -# -# Setting this value to 0s is valid, but may delete series before all -# remote_write shards have been able to write all data, and may cause errors on -# slower machines. -[min_wal_time: | default = "5m"] - -# The maximum amount of time that series and samples may exist within the WAL -# before being considered for deletion. Series that have not received writes -# since this period will be removed, and all samples older than this period will -# be removed. -# -# This value is useful in long-running network outages, preventing the WAL from -# growing forever. -# -# Must be larger than min_wal_time. -[max_wal_time: | default = "4h"] - -# Deadline for flushing data when a Prometheus instance shuts down -# before giving up and letting the shutdown proceed. -[remote_flush_deadline: | default = "1m"] - -# When true, writes staleness markers to all active series to -# remote_write. -[write_stale_on_shutdown: | default = false] - -# A list of scrape configuration rules. -scrape_configs: - - [] - -# A list of remote_write targets. -remote_write: - - [] -``` - -> **Note:** More information on the following types can be found on the Prometheus -> website: -> -> * [`relabel_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config) -> * [`scrape_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -> * [`remote_write`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write) - -## Data retention - -{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ./scraping-service" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/scraping-service.md b/docs/sources/static/configuration/scraping-service.md deleted file mode 100644 index ccfb2c67c6..0000000000 --- a/docs/sources/static/configuration/scraping-service.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -aliases: -- ../../configuration/scraping-service/ -- ../../scraping-service/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/scraping-service/ -- /docs/grafana-cloud/send-data/agent/static/configuration/scraping-service/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/scraping-service/ -description: Learn about the scraping service -menuTitle: Scraping service -title: Scraping service (Beta) -weight: 600 ---- - -# Scraping service (Beta) - -The Grafana Agent scraping service allows you to cluster a set of Agent processes and distribute the scrape load. - -Determining what to scrape is done by writing instance configuration files to an -[API][api], which then stores the configuration files in a KV store backend. -All agents in the cluster **must** use the same KV store to see the same set -of configuration files. - -Each process of the Grafana Agent can be running multiple independent -"instances" at once, where an "instance" refers to the combination of: - -- Service discovery for all `scrape_configs` within that loaded configuration -- Scrapes metrics from all discovered targets -- Stores data in its own Write-Ahead Log specific to the loaded configuration -- Remote Writes scraped metrics to the configured `remote_write` destinations - specified within the loaded configuration. - -The "instance configuration file," then, is the configuration file that -specifies the set of `scrape_configs` and `remote_write` endpoints. For example, -a small instance configuration file looks like this: - -```yaml -scrape_configs: - - job_name: self-scrape - static_configs: - - targets: ['localhost:9090'] - labels: - process: 'agent' -remote_write: - - url: http://cortex:9009/api/prom/push -``` - -The full set of supported options for an instance configuration file is -available in the -[`metrics-config.md` file][metrics]. - -Multiple instance configuration files are necessary for sharding. Each -config file is distributed to a particular agent on the cluster based on the -hash of its contents. - -When the scraping service is enabled, Agents **disallow** specifying -instance configurations locally in the configuration file; using the KV store -is required. [`agentctl`](#agentctl) can be used to manually sync -instance configuration files to the Agent's API server. - -## Distributed hash ring - -The scraping service uses a Distributed Hash Ring (commonly called "the -ring") to cluster agents and to shard configurations within that ring. Each -Agent joins the ring with a random distinct set of _tokens_ that are used for -sharding. The default number of generated tokens is 128. - -The Distributed Hash Ring is also stored in a KV store. Since a KV store is -also needed for storing configuration files, it is encouraged to re-use -the same KV store for the ring. - -When sharding, the Agent currently uses the name of a configuration file -stored in the KV store for load distribution. Configuration names are guaranteed to be -unique keys. The hash of the name is used as the _lookup key_ in the ring and -determines which agent (based on token) should be responsible for that configuration. -"Price is Right" rules are used for the Agent lookup; the Agent owning the token -with the closest value to the key without going over is responsible for the -configuration. - -All Agents are simultaneously watching the KV store for changes to the set of -configuration files. When a configuration file is added or updated in the configuration -store, each Agent will run the configuration name hash through their copy of the Hash -Ring to determine if they are responsible for that config. - -When an Agent receives a new configuration that it is responsible for, it launches a -new instance from the instance configuration. If a configuration is deleted from the KV store, -this will be detected by the owning Agent, and it will stop the metric collection -process for that configuration file. - -When an Agent receives an event for an updated configuration file that they used to -be the owner of but are no longer the owner, the associated instance for that -configuration file is stopped for that Agent. This can happen when the cluster -size changes. - -The scraping service currently does not support replication. Only one agent -at a time will be responsible for scraping a certain configuration. - -### Resharding - -When a new Agent joins or leaves the cluster, the set of tokens in the ring may -cause configurations to hash to a new Agent. The process of responding to this -action is called "resharding." - -Resharding is run: - -1. When an Agent joins the ring -2. When an Agent leaves the ring -3. When the KV store sends a notification indicating a configuration has changed. -4. On a specified interval if KV change events have not fired. - -The resharding process involves each Agent retrieving the full set of -configurations stored in the KV store and determining if: - -1. The configuration owned by the current resharding Agent has changed and needs to - be reloaded. -2. The configuration is no longer owned by the current resharding Agent and the - associated instance should be stopped. -3. The configuration has been deleted, and the associated instance should be stopped. - -## Best practices - -Because distribution is determined by the number of configuration files and not how -many targets exist per configuration file, the best amount of distribution is achieved -when each configuration file has the lowest amount of targets possible. The best -distribution will be achieved if each configuration file stored in the KV store is -limited to one static configuration with only one target. - -## Example - -Here's an example `agent.yaml` configuration file that uses the same `etcd` server for -both configuration storage and the distributed hash ring storage: - -```yaml -server: - log_level: debug - -metrics: - global: - scrape_interval: 1m - scraping_service: - enabled: true - kvstore: - store: etcd - etcd: - endpoints: - - etcd:2379 - lifecycler: - ring: - replication_factor: 1 - kvstore: - store: etcd - etcd: - endpoints: - - etcd:2379 -``` - -Note that there are no instance configurations present in this example; instance -configurations must be passed to the API for the Agent to start scraping metrics. - -## agentctl - -`agentctl` is a tool included with this repository that helps users interact -with the new Config Management API. The `agentctl config-sync` subcommand uses -local YAML files as a source of truth and syncs their contents with the API. -Entries in the API not in the synced directory will be deleted. - -`agentctl` is distributed in binary form with each release and as a Docker -container with the `grafana/agentctl` image. Tanka configurations that -utilize `grafana/agentctl` and sync a set of configurations to the API -are planned for the future. - -## Debug Ring endpoint - -You can use the `/debug/ring` endpoint to troubleshoot issues with the scraping service in Scraping Service Mode. -It provides information about the Distributed Hash Ring and the current distribution of configurations among Agents in the cluster. -It also allows you to forget an instance in the ring manually. - -You can access this endpoint by making an HTTP request to the Agent's API server. - -Information returned by the `/debug/ring` endpoint includes: - -- The list of Agents in the cluster, and their respective tokens used for sharding. -- The list of configuration files in the KV store and associated hash values used for lookup in the ring. -- The unique instance ID assigned to each instance of the Agent running in the cluster. - The instance ID is a unique identifier assigned to each running instance of the Agent within the cluster. - The exact details of the instance ID generation might be specific to the implementation of the Grafana Agent. -- The time of the "Last Heartbeat" of each instance. The Last Heartbeat is the last time the instance was active in the ring. - -{{% docs/reference %}} -[api]: "/docs/agent/ -> /docs/agent//static/api" -[api]: "/docs/grafana-cloud/ -> ../api" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ./metrics-config" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/server-config.md b/docs/sources/static/configuration/server-config.md deleted file mode 100644 index aaba5fee0c..0000000000 --- a/docs/sources/static/configuration/server-config.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -aliases: -- ../../configuration/server-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/server-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/server-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/server-config/ -description: Learn about server_config -title: server_config -weight: 100 ---- - -# server_config - -The `server_config` block configures the Agent's behavior as an HTTP server, -gRPC server, and the log level for the whole process. - -The Agent exposes an HTTP server for scraping its own metrics and gRPC for the -scraping service mode. - -```yaml -# Log only messages with the given severity or above. Supported values [debug, -# info, warn, error]. This level affects logging for all Agent-level logs, not -# just the HTTP and gRPC server. -# -# Note that some integrations use their own loggers which ignore this -# setting. -[log_level: | default = "info"] - -# Log messages with the given format. Supported values [logfmt, json]. -# This affects logging for all Agent-levle logs, not just the HTTP and gRPC -# server. -# -# Note that some integrations use their own loggers which ignore this -# setting. -[log_format: | default = "logfmt"] - -# TLS configuration for the HTTP server. Required when the -# -server.http.tls-enabled flag is provided, ignored otherwise. -[http_tls_config: ] - -# TLS configuration for the gRPC server. Required when the -# -server.grpc.tls-enabled flag is provided, ignored otherwise. -[grpc_tls_config: ] -``` - -## server_tls_config - -The `server_tls_config` configures TLS. - -```yaml -# File path to the server certificate -[cert_file: ] - -# File path to the server key -[key_file: ] - -# Tells the server what is acceptable from the client, this drives the options in client_tls_config -[client_auth_type: ] - -# File path to the signing CA certificate, needed if CA is not trusted -[client_ca_file: ] - -# Windows certificate filter allows selecting client CA and server certificate from the Windows Certificate store -[windows_certificate_filter: ] -``` - -## windows_certificate_filter_config - -The `windows_certificate_filter_config` configures the use of the Windows Certificate store. Setting cert_file, key_file, and client_ca_file are invalid settings when using the windows_certificate_filter. - -```yaml -# Client configuration, optional. If nothing specific will use the default client ca root -[client: ] - -# Name of the store to look for the Client Certificate ex My, CA -server: -``` - - -### windows_client_config - -```yaml -# Array of issuer common names to check against -issuer_common_names: - [- ... ] - -# Regular expression to match Subject name -[subject_regex: ] - -# Client Template ID to match in ASN1 format ex "1.2.3" -[template_id: ] -``` - -### windows_server_config - -```yaml -# Name of the system store to look for the Server Certificate ex LocalMachine, CurrentUser -system_store: - -# Name of the store to look for the Server Certificate ex My, CA -store: - -# Array of issuer common names to check against -issuer_common_names: -[- ... ] - - -# Server Template ID to match in ASN1 format ex "1.2.3" -[template_id: ] - -# How often to refresh the server certificate ex 5m, 1h -[refresh_interval: ] -``` diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md deleted file mode 100644 index 4ff3bfc85e..0000000000 --- a/docs/sources/static/configuration/traces-config.md +++ /dev/null @@ -1,482 +0,0 @@ ---- -aliases: -- ../../configuration/tempo-config/ -- ../../configuration/traces-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/traces-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/traces-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/traces-config/ -description: Learn about traces_config -title: traces_config -weight: 400 ---- - -# traces_config - -The `traces_config` block configures a set of Tempo instances, each of which -configures its own tracing pipeline. Having multiple configs allows you to -configure multiple distinct pipelines, each of which collects spans and sends -them to a different location. - -{{< admonition type="note" >}} -If you are using multiple configs, you must manually set port numbers for -each receiver, otherwise they will all try to use the same port and fail to -start. -{{< /admonition >}} - -```yaml -configs: - [ - ... ] - ``` - -## traces_instance_config - -```yaml -# Name configures the name of this Tempo instance. Names must be non-empty and -# unique across all Tempo instances. The value of the name here will appear in -# logs and as a label on metrics. -name: - -# This field allows for the general manipulation of tags on spans that pass -# through this agent. A common use may be to add an environment or cluster -# variable. -[ attributes: ] - -# This field allows to configure grouping spans into batches. Batching helps -# better compress the data and reduce the number of outgoing connections -# required transmit the data. -[ batch: ] - -remote_write: - # host:port to send traces to. - # Here must be the port of gRPC receiver, not the Tempo default port. - # Example for cloud instances: `tempo-us-central1.grafana.net:443` - # For local / on-premises instances: `localhost:55680` or `tempo.example.com:14250` - # Note: for non-encrypted connections you must also set `insecure: true` - - endpoint: - - # Custom HTTP headers to be sent along with each remote write request. - # Be aware that 'authorization' header will be overwritten in presence - # of basic_auth. - headers: - [ : ... ] - - # Controls whether compression is enabled. - [ compression: | default = "gzip" | supported = "none", "gzip"] - - # Controls what protocol to use when exporting traces. - # Only "grpc" is supported in Grafana Cloud. - [ protocol: | default = "grpc" | supported = "grpc", "http" ] - - # Controls what format to use when exporting traces, in combination with protocol. - # protocol/format supported combinations are grpc/otlp and http/otlp. - # Only grpc/otlp is supported in Grafana Cloud. - [ format: | default = "otlp" | supported = "otlp" ] - - # Controls whether or not TLS is required. See https://godoc.org/google.golang.org/grpc#WithInsecure - [ insecure: | default = false ] - - # Deprecated in favor of tls_config - # If both `insecure_skip_verify` and `tls_config.insecure_skip_verify` are used, - # the latter take precedence. - [ insecure_skip_verify: | default = false ] - - # Configures opentelemetry exporters to use the OpenTelemetry auth extension `oauth2clientauthextension`. - # Can not be used in combination with `basic_auth`. - # See https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/extension/oauth2clientauthextension/README.md - oauth2: - # Configures the TLS settings specific to the oauth2 client - # The client identifier issued to the oauth client - [ client_id: ] - # The secret string associated with the oauth client - [ client_secret: ] - # Additional parameters for requests to the token endpoint - [ endpoint_params: ] - # The resource server's token endpoint URL - [ token_url: ] - # Optional, requested permissions associated with the oauth client - [ scopes: [] ] - # Optional, specifies the timeout fetching tokens from the token_url. Default: no timeout - [ timeout: ] - # TLS client configuration for the underneath client to authorization server. - # https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/config/configtls/README.md - tls: - # Disable validation of the server certificate. - [ insecure: | default = false ] - # InsecureSkipVerify will enable TLS but not verify the certificate. - [ insecure_skip_verify: | default = false ] - # ServerName requested by client for virtual hosting. - # This sets the ServerName in the TLSConfig. Please refer to - # https://godoc.org/crypto/tls#Config for more information. - [ server_name_override: ] - # Path to the CA cert. For a client this verifies the server certificate. If empty uses system root CA. - [ ca_file: ] - # In memory PEM encoded cert. - [ ca_pem: ] - # Path to the TLS cert to use for TLS required connections - [ cert_file: ] - # In memory PEM encoded TLS cert to use for TLS required connections. - [ cert_pem: ] - # Path to the TLS key to use for TLS required connections - [ key_file: ] - # In memory PEM encoded TLS key to use for TLS required connections. - [ key_pem: ] - # Minimum acceptable TLS version. - [ min_version: | default = "1.2" ] - # Maximum acceptable TLS version. - # If not set, it is handled by crypto/tls - currently it is "1.3". - [ max_version: | default = "" ] - # ReloadInterval specifies the duration after which the certificate will be reloaded. - # If not set, it will never be reloaded. - [ reload_interval: ] - - # Controls TLS settings of the exporter's client: - # https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#tls_config - # This should be used only if `insecure` is set to false - tls_config: - # Path to the CA cert. For a client this verifies the server certificate. If empty uses system root CA. - [ ca_file: ] - # Path to the TLS cert to use for TLS required connections - [ cert_file: ] - # Path to the TLS key to use for TLS required connections - [ key_file: ] - # Disable validation of the server certificate. - [ insecure_skip_verify: | default = false ] - - # Sets the `Authorization` header on every trace push with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - - [ sending_queue: ] - [ retry_on_failure: ] - -# This processor writes a well formatted log line to a logs instance for each span, root, or process -# that passes through the Agent. This allows for automatically building a mechanism for trace -# discovery and building metrics from traces using Loki. It should be considered experimental. -automatic_logging: - # Indicates where the stream of log lines should go. Either supports writing - # to a logs instance defined in this same config or to stdout. - [ backend: | default = "stdout" | supported = "stdout", "logs_instance" ] - # Indicates the logs instance to write logs to. - # Required if backend is set to logs_instance. - [ logs_instance_name: ] - # Log one line per span. Warning! possibly very high volume - [ spans: ] - # Log one line for every root span of a trace. - [ roots: ] - # Log one line for every process - [ processes: ] - # Additional span attributes to log - [ span_attributes: ] - # Additional process attributes to log - [ process_attributes: ] - # Timeout on writing logs to Loki when backend is "logs_instance." - [ timeout: | default = 1ms ] - # Configures a set of key values that will be logged as labels - # They need to be span or process attributes logged in the log line - # - # This feature only applies when `backend = logs_instance` - # - # Loki only accepts alphanumeric and "_" as valid characters for labels. - # Labels are sanitized by replacing invalid characters with underscores. - [ labels: ] - overrides: - [ logs_instance_tag: | default = "traces" ] - [ service_key: | default = "svc" ] - [ span_name_key: | default = "span" ] - [ status_key: | default = "status" ] - [ duration_key: | default = "dur" ] - [ trace_id_key: | default = "tid" ] - -# Receiver configurations are mapped directly into the OpenTelemetry receivers -# block. At least one receiver is required. -# The Agent uses OpenTelemetry {{< param "OTEL_VERSION" >}}. Refer to the corresponding receiver's config. -# -# Supported receivers: otlp, jaeger, kafka, opencensus and zipkin. -receivers: - -# A list of prometheus scrape configs. Targets discovered through these scrape -# configs have their __address__ matched against the ip on incoming spans. If a -# match is found then relabeling rules are applied. -scrape_configs: - [ - ... ] -# Defines what method is used when adding k/v to spans. -# Options are `update`, `insert` and `upsert`. -# `update` only modifies an existing k/v and `insert` only appends if the k/v -# is not present. `upsert` does both. -[ prom_sd_operation_type: | default = "upsert" ] -# Configures what methods to use to do association between spans and pods. -# PromSD processor matches the IP address of the metadata labels from the k8s API -# with the IP address obtained from the specified pod association method. -# If a match is found then the span is labeled. -# -# Options are `ip`, `net.host.ip`, `k8s.pod.ip`, `hostname` and `connection`. -# - `ip`, `net.host.ip` and `k8s.pod.ip`, `hostname` match spans tags. -# - `connection` inspects the context from the incoming requests (gRPC and HTTP). -# -# Tracing instrumentation is commonly the responsible for tagging spans -# with IP address to the labels mentioned above. -# If running on kubernetes, `k8s.pod.ip` can be automatically attached via the -# downward API. For example, if you're using OTel instrumentation libraries, set -# OTEL_RESOURCE_ATTRIBUTES=k8s.pod.ip=$(POD_IP) to inject spans with the sender -# pod's IP. -# -# By default, all methods are enabled, and evaluated in the order specified above. -# Order of evaluation is honored when multiple methods are enabled. -prom_sd_pod_associations: - [ - ... ] - -# spanmetrics supports aggregating Request, Error and Duration (R.E.D) metrics -# from span data. -# -# spanmetrics generates two metrics from spans and uses remote_write or -# OpenTelemetry Prometheus exporters to serve the metrics locally. -# -# In order to use the remote_write exporter, you have to configure a Prometheus -# instance in the Agent and pass its name to the `metrics_instance` field. -# -# If you want to use the OpenTelemetry Prometheus exporter, you have to -# configure handler_endpoint and then scrape that endpoint. -# -# The first generated metric is `calls`, a counter to compute requests. -# The second generated metric is `latency`, a histogram to compute the -# operation's duration. -# -# If you want to rename the generated metrics, you can configure the `namespace` -# option of prometheus exporter. -# -# This is an experimental feature of Opentelemetry-Collector and the behavior -# may change in the future. -spanmetrics: - # latency_histogram_buckets and dimensions are the same as the configs in - # spanmetricsprocessor. - [ latency_histogram_buckets: ] - [ dimensions: ] - # const_labels are labels that will always get applied to the exported - # metrics. - const_labels: - [ : ... ] - # Metrics are namespaced to `traces_spanmetrics` by default. - # They can be further namespaced, i.e. `{namespace}_traces_spanmetrics` - [ namespace: ] - # metrics_instance is the metrics instance used to remote write metrics. - [ metrics_instance: ] - # handler_endpoint defines the endpoint where the OTel prometheus exporter will be exposed. - [ handler_endpoint: ] - # dimensions_cache_size defines the size of cache for storing Dimensions. - [ dimensions_cache_size: | default = 1000 ] - # aggregation_temporality configures whether to reset the metrics after flushing. - # It can be either AGGREGATION_TEMPORALITY_CUMULATIVE or AGGREGATION_TEMPORALITY_DELTA. - [ aggregation_temporality: | default = "AGGREGATION_TEMPORALITY_CUMULATIVE" ] - # metrics_flush_interval configures how often to flush generated metrics. - [ metrics_flush_interval: | default = 15s ] - -# tail_sampling supports tail-based sampling of traces in the agent. -# -# Policies can be defined that determine what traces are sampled and sent to the -# backends and what traces are dropped. -# -# In order to make a correct sampling decision it's important that the agent has -# a complete trace. This is achieved by waiting a given time for all the spans -# before evaluating the trace. -# -# Tail sampling also supports multi agent deployments, allowing to group all -# spans of a trace in the same agent by load balancing the spans by trace ID -# between the instances. -# * To make use of this feature, check load_balancing below * -tail_sampling: - # policies define the rules by which traces will be sampled. Multiple policies - # can be added to the same pipeline. - policies: - [ - ... ] - - # Time that to wait before making a decision for a trace. - # Longer wait times reduce the probability of sampling an incomplete trace at - # the cost of higher memory usage. - [ decision_wait: | default = 5s ] - - # Optional, number of traces kept in memory - [ num_traces: | default = 50000 ] - - # Optional, expected number of new traces (helps in allocating data structures) - [ expected_new_traces_per_sec: | default = 0 ] - -# load_balancing configures load balancing of spans across multi agent deployments. -# It ensures that all spans of a trace are sampled in the same instance. -# It works by exporting spans based on their traceID via consistent hashing. -# -# Enabling this feature is required for "tail_sampling", "spanmetrics", and "service_graphs" -# to correctly work when spans are ingested by multiple agent instances. -# -# Load balancing works by layering two pipelines and consistently exporting -# spans belonging to a trace to the same agent instance. -# Agent instances need to be able to communicate with each other via gRPC. -# -# When load_balancing is enabled: -# 1. When an Agent receives spans from the configured "receivers". -# 2. If the "attributes" processor is configured, it will run through all the spans. -# 3. The spans will be exported using the "load_balancing" configuration to any of the Agent instances. -# This may or may not be the same Agent which has already received the span. -# 4. The Agent which received the span from the loadbalancer will run these processors, -# in this order, if they are configured: -# 1. "spanmetrics" -# 2. "service_graphs" -# 3. "tail_sampling" -# 4. "automatic_logging" -# 5. "batch" -# 5. The spans are then remote written using the "remote_write" configuration. -# -# Load balancing significantly increases CPU usage. This is because spans are -# exported an additional time between agents. -load_balancing: - # resolver configures the resolution strategy for the involved backends - # It can be either "static", "dns" or "kubernetes". - resolver: - static: - # A fixed list of hostnames. - hostnames: - [ - ... ] - dns: - # DNS hostname from which to resolve IP addresses. - hostname: - # Port number to use with the resolved IP address when exporting spans. - [ port: | default = 4317 ] - # Resolver interval - [ interval: | default = 5s ] - # Resolver timeout - [ timeout: | default = 1s ] - # The kubernetes resolver receives IP addresses of a Kubernetes service - # from the Kubernetes API. It does not require polling. The Kubernetes API - # notifies the Agent when a new pod is available and when an old pod has exited. - # - # For the kubernetes resolver to work, Agent must be running under - # a system account with "list", "watch" and "get" permissions. - kubernetes: - service: - [ ports: | default = 4317 ] - - # routing_key can be either "traceID" or "service": - # * "service": exports spans based on their service name. - # * "traceID": exports spans based on their traceID. - [ routing_key: | default = "traceID" ] - - # receiver_port is the port the instance will use to receive load balanced traces - receiver_port: [ | default = 4318 ] - - # Load balancing is done via an otlp exporter. - # The remaining configuration is common with the remote_write block. - exporter: - # Controls whether compression is enabled. - [ compression: | default = "gzip" | supported = "none", "gzip"] - - # Controls whether or not TLS is required. - [ insecure: | default = false ] - - # Disable validation of the server certificate. Only used when insecure is set - # to false. - [ insecure_skip_verify: | default = false ] - - # Sets the `Authorization` header on every trace push with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# service_graphs configures processing of traces for building service graphs in -# the form of prometheus metrics. The generated metrics represent edges between -# nodes in the graph. Nodes are represented by `client` and `server` labels. -# -# e.g. tempo_service_graph_request_total{client="app", server="db"} 20 -# -# Service graphs works by inspecting spans and looking for the tag `span.kind`. -# If it finds the span kind to be client or server, it stores the request in a -# local in-memory store. -# -# That request waits until its corresponding client or server pair span is -# processed or until the maximum waiting time has passed. -# When either of those conditions is reached, the request is processed and -# removed from the local store. If the request is complete by that time, it'll -# be recorded as an edge in the graph. -# -# Service graphs supports multi-agent deployments, allowing to group all spans -# of a trace in the same agent by load balancing the spans by trace ID between -# the instances. -# * To make use of this feature, check load_balancing above * -service_graphs: - [ enabled: | default = false ] - - # configures the time the processor will wait since a span is consumed until - # it's considered expired if its paired has not been processed. - # - # increasing the waiting time will increase the percentage of paired spans. - # retaining unpaired spans for longer will make reaching max_items more likely. - [ wait: | default = 10s ] - - # configures the max amount of edges that will be stored in memory. - # - # spans that arrive to the processor that do not pair with an already - # processed span are dropped. - # - # a higher max number of items increases the max throughput of processed spans - # with a higher memory consumption. - [ max_items: | default = 10_000 ] - - # configures the number of workers that will process completed edges concurrently. - # as edges are completed, they get queued to be collected as metrics for the graph. - [ workers: | default = 10 ] - - # configures what status codes are considered as successful (e.g. HTTP 404). - # - # by default, a request is considered failed in the following cases: - # 1. HTTP status is not 2XX - # 1. gRPC status code is not OK - # 1. span status is Error - success_codes: - # http status codes not to be considered as failure - http: - [ - ... ] - # grpc status codes not to be considered as failure - grpc: - [ - ... ] - -# jaeger_remote_sampling configures one or more jaeger remote sampling extensions. -# For more details about the configuration please consult the OpenTelemetry documentation: -# https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/extension/jaegerremotesampling -# -# Example config: -# -# jaeger_remote_sampling: -# - source: -# remote: -# endpoint: jaeger-collector:14250 -# tls: -# insecure: true -# - source: -# reload_interval: 1s -# file: /etc/otelcol/sampling_strategies.json -# -jaeger_remote_sampling: - [ - ... ] -``` - -More information on the following types can be found on the documentation for their respective projects: - -* [`attributes.config`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/attributesprocessor) -* [`batch.config`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/processor/batchprocessor) -* [`otlpexporter.sending_queue`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/exporter/otlpexporter) -* [`otlpexporter.retry_on_failure`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/exporter/otlpexporter) -* `receivers`: - * [`jaegerreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/jaegerreceiver) - * [`kafkareceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/kafkareceiver) - * [`otlpreceiver`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/receiver/otlpreceiver) - * [`opencensusreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/opencensusreceiver) - * [`zipkinreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/zipkinreceiver) -* [`scrape_config`: Prometheus](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -* [`spanmetricsprocessor.latency_histogram_buckets`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/processor/spanmetricsprocessor/config.go#L37-L39) -* [`spanmetricsprocessor.dimensions`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/processor/spanmetricsprocessor/config.go#L41-L48) -* [`tailsamplingprocessor.policies`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/tailsamplingprocessor) diff --git a/docs/sources/static/operation-guide/_index.md b/docs/sources/static/operation-guide/_index.md deleted file mode 100644 index f50c335574..0000000000 --- a/docs/sources/static/operation-guide/_index.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -aliases: -- ../operation-guide/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/operation-guide/ -- /docs/grafana-cloud/send-data/agent/static/operation-guide/ -canonical: https://grafana.com/docs/agent/latest/static/operation-guide/ -description: Learn how to operate Grafana Agent -title: Operation guide -weight: 700 ---- - -# Operation guide - -This guide helps you operate Grafana Agent. - -## Horizontal Scaling - -There are three options to horizontally scale your deployment of Grafana Agents: - -- [Host filtering](#host-filtering-beta) requires you to run one Agent on every - machine you wish to collect metrics from. Agents will only collect metrics - from the machines they run on. -- [Hashmod sharding](#hashmod-sharding-stable) allows you to roughly shard the - discovered set of targets by using hashmod/keep relabel rules. -- The [scraping service][scrape] allows you to cluster Grafana - Agents and have them distribute per-tenant configs throughout the cluster. - -Each has their own set of tradeoffs: - -- Host Filtering (Beta) - - Pros - - Does not need specialized configs per agent - - No external dependencies required to operate - - Cons - - Can cause significant load on service discovery APIs - - Requires each Agent to have the same list of scrape configs/remote_writes -- Hashmod sharding (Stable) - - Pros - - Exact control on the number of shards to run - - Smaller load on SD compared to host filtering (as there are a smaller # of - Agents) - - No external dependencies required to operate - - Cons - - Each Agent must have a specialized config with their shard number inserted - into the hashmod/keep relabel rule pair. - - Requires each Agent to have the same list of scrape configs/remote_writes, - with the exception of the hashmod rule being different. - - Hashmod is not [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing), - so up to 100% of jobs will move to a new machine when scaling shards. -- Scraping service (Beta) - - Pros - - Agents don't have to have a synchronized set of scrape configs / remote_writes - (they pull from a centralized location). - - Exact control on the number of shards to run. - - Uses [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing), - so only 1/N jobs will move to a new machine when scaling shards. - - Smallest load on SD compared to host filtering, as only one Agent is - responsible for a config. - - Cons - - Centralized configs must discover a [minimal set of targets][targets] - to distribute evenly. - - Requires running a separate KV store to store the centralized configs. - - Managing centralized configs adds operational burden over managing a config - file. - -## Host filtering (Beta) - -Host filtering implements a form of "dumb sharding," where operators may deploy -one Grafana Agent instance per machine in a cluster, all using the same -configuration, and the Grafana Agents will only scrape targets that are -running on the same node as the Agent. - -Running with `host_filter: true` means that if you have a target whose host -machine is not also running a Grafana Agent process, _that target will not -be scraped!_ - -Host filtering is usually paired with a dedicated Agent process that is used for -scraping targets that are running outside of a given cluster. For example, when -running the Grafana Agent on GKE, you would have a DaemonSet with -`host_filter` for scraping in-cluster targets, and a single dedicated Deployment -for scraping other targets that are not running on a cluster node, such as the -Kubernetes control plane API. - -If you want to scale your scrape load without host filtering, you can use the -[scraping service][scrape] instead. - -The host name of the Agent is determined by reading `$HOSTNAME`. If `$HOSTNAME` -isn't defined, the Agent will use Go's [os.Hostname](https://golang.org/pkg/os/#Hostname) -to determine the hostname. - -The following meta-labels are used to determine if a target is running on the -same machine as the Agent: - -- `__address__` -- `__meta_consul_node` -- `__meta_dockerswarm_node_id` -- `__meta_dockerswarm_node_hostname` -- `__meta_dockerswarm_node_address` -- `__meta_kubernetes_pod_node_name` -- `__meta_kubernetes_node_name` -- `__host__` - -The final label, `__host__`, isn't a label added by any Prometheus service -discovery mechanism. Rather, `__host__` can be generated by using -`host_filter_relabel_configs`. This allows for custom relabeling -rules to determine the hostname where the predefined ones fail. Relabeling rules -added with `host_filter_relabel_configs` are temporary and just used for the -host_filtering mechanism. Full relabeling rules should be applied in the -appropriate `scrape_config` instead. - -Note that scrape_config `relabel_configs` do not apply to the host filtering -logic; only `host_filter_relabel_configs` will work. - -If the determined hostname matches any of the meta labels, the discovered target -is allowed. Otherwise, the target is ignored, and will not show up in the -[targets API][api]. - -## Hashmod sharding (Stable) - -Grafana Agents can be sharded by using a pair of hashmod/keep relabel rules. -These rules will hash the address of a target and modulus it with the number -of Agent shards that are running. - -```yaml -scrape_configs: -- job_name: some_job - # Add usual service discovery here, such as static_configs - relabel_configs: - - source_labels: [__address__] - modulus: 4 # 4 shards - target_label: __tmp_hash - action: hashmod - - source_labels: [__tmp_hash] - regex: ^1$ # This is the 2nd shard - action: keep -``` - -Add the `relabel_configs` to all of your scrape_config blocks. Ensure that each -running Agent shard has a different value for the `regex`; the first Agent shard -should have `^0$`, the second should have `^1$`, and so on, up to `^3$`. - -This sharding mechanism means each Agent will ignore roughly 1/N of the total -targets, where N is the number of shards. This allows for horizontal scaling the -number of Agents and distributing load between them. - -Note that the hashmod used here is not a consistent hashing algorithm; this -means that changing the number of shards may cause any number of targets to move -to a new shard, up to 100%. When moving to a new shard, any existing data in the -WAL from the old machine is effectively discarded. - -## Prometheus instances - -The Grafana Agent defines a concept of a Prometheus _Instance_, which is -its own mini Prometheus-lite server. The instance runs a combination of -Prometheus service discovery, scraping, a WAL for storage, and `remote_write`. - -Instances allow for fine grained control of what data gets scraped and where it -gets sent. Users can easily define two Instances that scrape different subsets -of metrics and send them to two completely different remote_write systems. - -Instances are especially relevant to the [scraping service mode][scrape], -where breaking up your scrape configs into multiple Instances is required for -sharding and balancing scrape load across a cluster of Agents. - -## Instance sharing (Stable) - -The v0.5.0 release of the Agent introduced the concept of _instance sharing_, -which combines scrape_configs from compatible instance configs into a single, -shared Instance. Instance configs are compatible when they have no differences -in configuration with the exception of what they scrape. `remote_write` configs -may also differ in the order which endpoints are declared, but the unsorted -`remote_writes` must still be an exact match. - -In the shared instances mode, the `name` field of `remote_write` configs is -ignored. The resulting `remote_write` configs will have a name identical to the -first six characters of the group name and the first six characters of the hash -from that `remote_write` config separated by a `-`. - -The shared instances mode is the new default, and the previous behavior is -deprecated. If you wish to restore the old behavior, set `instance_mode: distinct` -in the [`metrics_config`][metrics] block of your config file. - -Shared instances are completely transparent to the user with the exception of -exposed metrics. With `instance_mode: shared`, metrics for Prometheus components -(WAL, service discovery, remote_write, etc.) have a `instance_group_name` label, -which is the hash of all settings used to determine the shared instance. When -`instance_mode: distinct` is set, the metrics for Prometheus components will -instead have an `instance_name` label, which matches the name set on the -individual Instance config. It is recommended to use the default of -`instance_mode: shared` unless you don't mind the performance hit and really -need granular metrics. - -Users can use the [targets API][api] to see all scraped targets, and the name -of the shared instance they were assigned to. - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ../configuration/scraping-service" -[targets]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service#best-practices" -[targets]: "/docs/grafana-cloud/ -> ../configuration/scraping-service#best-practices" -[api]: "/docs/agent/ -> /docs/agent//static/api#agent-api" -[api]: "/docs/grafana-cloud/ -> ../api#agent-api" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ../configuration/metrics-config" -{{% /docs/reference %}} diff --git a/docs/sources/static/release-notes.md b/docs/sources/static/release-notes.md deleted file mode 100644 index 90afd41dfc..0000000000 --- a/docs/sources/static/release-notes.md +++ /dev/null @@ -1,1140 +0,0 @@ ---- -aliases: -- ../upgrade-guide/ -- ./upgrade-guide/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/release-notes/ -- /docs/grafana-cloud/send-data/agent/static/release-notes/ -canonical: https://grafana.com/docs/agent/latest/static/release-notes/ -description: Release notes for Grafana Agent static mode -menuTitle: Release notes -title: Release notes -weight: 999 ---- - -# Release notes - -The release notes provide information about deprecations and breaking changes in Grafana Agent static mode. - -For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -> **Note:** These release notes are specific to Grafana Agent static mode. -> Other release notes for the different Grafana Agent variants are contained on separate pages: -> -> * [Static mode Kubernetes operator release notes][release-notes-operator] -> * [Flow mode release notes][release-notes-flow] - -{{% docs/reference %}} -[release-notes-operator]: "/docs/agent/ -> /docs/agent//operator/release-notes" -[release-notes-operator]: "/docs/grafana-cloud/ -> ../operator/release-notes" - -[release-notes-flow]: "/docs/agent/ -> /docs/agent//flow/release-notes" -[release-notes-flow]: "/docs/grafana-cloud/ -> /docs/agent//flow/release-notes" - -[Modules]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[Modules]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/modules" -{{% /docs/reference %}} - -## v0.38 - -### Breaking change: support for exporting Jaeger traces removed - -The deprecated support for exporting Jaeger-formatted traces has been removed. -To send traces to Jaeger, export OTLP-formatted data to a version of Jaeger -that supports OTLP. - -## v0.37 - -### Breaking change: The default value of `retry_on_http_429` is overriden to `true` for the `queue_config` in `remote_write` in `metrics` config. - -{{< admonition type="note" >}} -The default set by Grafana Agent Static Mode is different than the default set by Prometheus. -{{< /admonition >}} - -The Prometheus default value for `retry_on_http_429` is set to `true` for the `queue_config` in `remote_write`. -This changed default setting allows the agent to retry sending data when it receives an HTTP 429 error and helps avoid losing data in metric pipelines. - -* If you explicitly set the `retry_on_http_429`, no action is required. -* If you do not explicitly set `retry_on_http_429` and you do *not* want to retry on HTTP 429, make sure you set it to `false` when you upgrade to this new version. - -### Breaking change: Renamed `non_indexed_labels` Loki processing stage to `structured_metadata`. - -If you use the Loki processing stage in your Agent configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. - - -Old configuration example: - -```yaml - pipeline_stages: - - logfmt: - mapping: - app: - - non_indexed_labels: - app: -``` - -New configuration example: - -```yaml - pipeline_stages: - - logfmt: - mapping: - app: - - structured_metadata: - app: -``` - -## v0.35 - -### Breaking change: Jaeger remote sampling no longer configurable using the Jaeger receiver - -Jaeger remote sampling used to be configured using the Jaeger receiver configuration. This receiver was updated to a new version, where support for remote sampling in the receiver was removed. - -Jaeger remote sampling is available as a separate configuration field starting in v0.35.3. - -Old configuration example: - -```yaml -receivers: - jaeger: - protocols: - grpc: - remote_sampling: - strategy_file: /etc/agent/strategies.json - strategy_file_reload_interval: 1s -``` - -New configuration example: - -```yaml -jaeger_remote_sampling: - - source: - file: /etc/agent/strategies.json - reload_interval: 1s -``` - -### Breaking change: `auth` and `version` attributes from `walk_params` block of SNMP integration have been removed - -The SNMP integrations (both v1 and v2) wrap a new version of SNMP exporter which introduces a new configuration file format. -This new format separates the walk and metric mappings from the connection and authentication settings. This allows for easier configuration of different -auth params without having to duplicate the full walk and metric mapping. - -Old configuration example: - -```yaml - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - walk_params: - public: - retries: 2 - version: 2 - auth: - community: public -``` - -New configuration example: - -```yaml - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - walk_params: - public: - retries: 2 -``` - -See [Module and Auth Split Migration](https://github.com/prometheus/snmp_exporter/blob/main/auth-split-migration.md) for more details. - -### Removal of Dynamic Configuration - -The experimental feature Dynamic Configuration has been removed. The use case of dynamic configuration will be replaced -with [Modules][] in Grafana Agent Flow. - -### Breaking change: Removed and renamed tracing metrics - -In the traces subsystem for Static mode some metrics are removed and others are renamed. -The reason for the removal is a bug which caused the metrics to be incorrect if more than one instance of a traces configuration is specified. - -Removed metrics: -- "blackbox_exporter_config_last_reload_success_timestamp_seconds" (gauge) -- "blackbox_exporter_config_last_reload_successful" (gauge) -- "blackbox_module_unknown_total" (counter) -- "traces_processor_tail_sampling_count_traces_sampled" (counter) -- "traces_processor_tail_sampling_new_trace_id_received" (counter) -- "traces_processor_tail_sampling_sampling_decision_latency" (histogram) -- "traces_processor_tail_sampling_sampling_decision_timer_latency" (histogram) -- "traces_processor_tail_sampling_sampling_policy_evaluation_error" (counter) -- "traces_processor_tail_sampling_sampling_trace_dropped_too_early" (counter) -- "traces_processor_tail_sampling_sampling_traces_on_memory" (gauge) -- "traces_receiver_accepted_spans" (counter) -- "traces_receiver_refused_spans" (counter) -- "traces_exporter_enqueue_failed_log_records" (counter) -- "traces_exporter_enqueue_failed_metric_points" (counter) -- "traces_exporter_enqueue_failed_spans" (counter) -- "traces_exporter_queue_capacity" (gauge) -- "traces_exporter_queue_size" (gauge) - -Renamed metrics: -- "traces_receiver_refused_spans" is renamed to "traces_receiver_refused_spans_total" -- "traces_receiver_accepted_spans" is renamed to "traces_receiver_refused_spans_total" -- "traces_exporter_sent_metric_points" is renamed to "traces_exporter_sent_metric_points_total" - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -### Deprecation of Dynamic Configuration - -[Dynamic Configuration](/docs/agent/v0.33/cookbook/dynamic-configuration/) will be removed in v0.34. -The use case of dynamic configuration will be replaced with Modules in Grafana Agent Flow. - -## v0.32 - -### Breaking change: `node_exporter` configuration options changed - -With the update of the `node_exporter` integration to use v1.5.0, configuration -options for the `diskstats` collector have changed names: - -- `diskstats_ignored_devices` is now `diskstats_device_exclude` in the static - mode configuration. -- `ignored_devices` is now `device_exclude` in the Flow component - configuration. - -## v0.31.1 - -### Breaking change: all Windows executables are now zipped - -All release Windows `.exe` files are now zipped. Prior to v0.31, only -`grafana-agent-installer.exe` was unzipped. - -This fixes an issue from v0.31.0 where all `.exe` files were accidentally left -unzipped. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, release binary names are now prefixed with -`grafana-`: - -- `agent` is now `grafana-agent`. -- `agentctl` is now `grafana-agentctl`. - -For the `grafana/agent` Docker container, the entrypoint is now -`/bin/grafana-agent`. A symbolic link from `/bin/agent` to the new binary has -been added. - -For the `grafana/agentctl` Docker container, the entrypoint is now -`/bin/grafana-agentctl`. A symbolic link from `/bin/agentctl` to the new binary -has been added. - -These symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.30 - -### Breaking change: `ebpf_exporter` integration removed - -The `ebpf_exporter` version bundled in the Agent used [bcc][] to compile eBPF -programs at runtime. This made it hard to run successfully, as the -dynamic linking approach required a compiler, the correct kernel headers, as -well as an exact match of the libbpf toolchain on the host system. For these -reasons, we've decided to remove the `ebpf_exporter` integration. - -Running the `ebpf_exporter` integration is now deprecated and will result in -configuration errors. To continue using the same configuration file, remove the -`ebpf` block. - -[bcc]: https://github.com/iovisor/bcc - -## v0.29 - -### Breaking change: JSON-encoded traces from OTLP versions below 0.16.0 are no longer supported - -Grafana Agent's OpenTelemetry Collector dependency has been updated from -v0.55.0 to v0.61.0. OpenTelemetry Collector v0.58.0 [no longer -translates][translation-removal] from InstrumentationLibrary to Scope. - -This means that JSON-encoded traces that still use InstrumentationLibrary will -be dropped. To work around this issue, either send traces using protobuf or -update your OTLP protocol version to v0.16.0 or newer. - -[translation-removal]: https://github.com/open-telemetry/opentelemetry-collector/pull/5819 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The binary names `agent` and `agentctl` have been deprecated -and will be renamed to `grafana-agent` and `grafana-agentctl` respectively in -the v0.31.0 release. - -As part of this change, the Docker containers for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. - -## v0.26 - -### Breaking change: Deprecated YAML fields in `server` block removed - -The YAML fields which were first [deprecated in the v0.24.0 -release](#deprecation-on-yaml-fields-in-server-block-that-have-flags) have now -been removed, replaced by equivalent command line flags. Please refer to the -original deprecation notice for instructions for how to migrate to the command -line flags. - -### Breaking change: Reconcile sampling policies between Agent and OTel - -Configuring sampling policies in the `tail_sampling` block of the `traces` -block has been changed to be equal with the upstream configuration of the OTel -processor. It now requires that the policy `type` is specified. - -Old configuration: - -```yaml -traces: - configs: - - name: default - ... - tail_sampling: - policies: - - latency: - threshold_ms: 100 -``` - -New configuration: - -```yaml -traces: - configs: - - name: default - ... - tail_sampling: - policies: - - type: latency - latency: - threshold_ms: 100 -``` - -## v0.24 - -### Breaking change: Integrations renamed when `integrations-next` feature flag is used - -This change only applies to users utilizing the `integrations-next` feature -flag. Nothing is changed for configuring integrations when the feature flag is -not used. - -Most `integrations-next` integrations have been renamed to describe what -telemetry data they generate instead of the projects they are powered by. - -* `consul_exporter` is now `consul` -* `dnsmasq_exporter` is now `dnsmasq` -* `elasticsearch_exporter` is now `elasticsearch` -* `github_exporter` is now `github` -* `kafka_exporter` is now `kafka` -* `memcached_exporter` is now `memcached` -* `mongodb_exporter` is now `mongodb` -* `mysqld_exporter` is now `mysql` - * Note that it is `mysql` and _not_ `mysqld` -* `postgres_exporter` is now `postgres` -* `process_exporter` is now `process` -* `redis_exporter` is now `redis` -* `statsd_exporter` is now `statsd` -* `windows_exporter` is now `windows` - -Keys in the `integrations` config block have changed to match the above: - -* `integrations.consul_exporter_configs` is now `integrations.consul_configs` -* `integrations.dnsmasq_exporter_configs` is now `integrations.dnsmasq_configs` -* `integrations.elasticsearch_exporter_configs` is now `integrations.elasticsearch_configs` -* `integrations.github_exporter_configs` is now `integrations.github_configs` -* `integrations.kafka_exporter_configs` is now `integrations.kafka_configs` -* `integrations.memcached_exporter_configs` is now `integrations.memcached_configs` -* `integrations.mongodb_exporter_configs` is now `integrations.mongodb_configs` -* `integrations.mysqld_exporter_configs` is now `integrations.mysql_configs` -* `integrations.postgres_exporter_configs` is now `integrations.postgres_configs` -* `integrations.process_exporter` is now `integrations.process` -* `integrations.redis_exporter_configs` is now `integrations.redis_configs` -* `integrations.statsd_exporter` is now `integrations.statsd` -* `integrations.windows_exporter` is now `integrations.windows` - -Integrations not listed here have not changed; `node_exporter` still has the -same name. - -This change propagates to the label values generated by these integrations. For -example, `job="integrations/redis_exporter` will now be `job="redis"`. - -### Change: Separating YAML and command line flags - -As of this release, we are starting to separate what can be configured within -the YAML file, and what can be configured by command line flag. Previously, -there was a lot of overlap: many things could be set by both command line flag -and configuration file, with command line flags taking precedence. - -The configuration file will be used for settings that can be updated at runtime -using the `/-/reload` endpoint or sending SIGHUP. Meanwhile, command line flags -will be used for settings that must remain consistent throughout the process -lifetime, such as the HTTP listen port. - -This conceptual change will require some number of breaking changes. This -release focuses on the `server` block of the YAML, which has historically -caused the most issues with the `/-/reload` endpoint working correctly. - -There may be more breaking changes in the future as we identify more settings -that must be static and moved to flags. These changes will either be moving a -YAML field to a flag or moving a flag to a YAML field. After we are done with -this migration, there will be no overlap between flags and the YAML file. - -### Deprecation on YAML fields in `server` block that have flags - -The `server` block is the most impacted by the separation of flags/fields. -Instead of making a breaking change immediately, we are deprecating these -fields. - -> **NOTE**: These deprecated fields will be removed in the v0.26.0 release. We -> will communicate when other deprecated features will be removed when a -> timeline is established. - -The following fields are now deprecated in favor of command line flags: - -* `server.register_instrumentation` -* `server.graceful_shutdown_timeout` -* `server.log_source_ips_enabled` -* `server.log_source_ips_header` -* `server.log_source_ips_regex` -* `server.http_listen_network` -* `server.http_listen_address` -* `server.http_listen_port` -* `server.http_listen_conn_limit` -* `server.http_server_read_timeout` -* `server.http_server_write_timout` -* `server.http_server_idle_timeout` -* `server.grpc_listen_network` -* `server.grpc_listen_address` -* `server.grpc_listen_port` -* `server.grpc_listen_conn_limit` -* `server.grpc_server_max_recv_msg_size` -* `server.grpc_server_max_send_msg_size` -* `server.grpc_server_max_concurrent_streams` -* `server.grpc_server_max_connection_idle` -* `server.grpc_server_max_connection_age` -* `server.grpc_server_max_connection_age_grace` -* `server.grpc_server_keepalive_time` -* `server.grpc_server_keepalive_timeout` -* `server.grpc_server_min_time_between_pings` -* `server.grpc_server_ping_without_stream_allowed` - -This is most of the fields; the remaining non-deprecated fields are -`server.log_level`, `server.log_format`, `server.http_tls_config`, and -`server.grpc_tls_config`, which support dynamic updating. - -### Breaking change: Removing support for dynamically updating deprecated server fields - -`/-/reload` will now fail if any of the deprecated server block fields have -changed. It is still valid to change a non-deprecated field (i.e., changing the -log level). - -### Breaking change: Server-specific command line flags have changed - -The following flags are _new_: - -* `-server.http.enable-tls` -* `-server.grpc.enable-tls` -* `-server.http.address` -* `-server.grpc.address` - -The following flags have been _removed_: - -* `-log.level` (replacement: use YAML field `server.log_level`) -* `-log.format` (replacement: use YAML field `server.log_format`) -* `-server.http-tls-cert-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-key-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-client-auth` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-ca-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.grpc-tls-cert-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-key-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-client-auth` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-ca-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.http-listen-address` (replacement: use the new `-server.http.address` flag, which combines host and port) -* `-server.http-listen-port` (replacement: use the new `-server.http.address` flag, which combines host and port) -* `-server.grpc-listen-address` (replacement: use the new `-server.grpc.address` flag, which combines host and port) -* `-server.grpc-listen-port` (replacement: use the new `-server.grpc.address` flag, which combines host and port) -* `-server.path-prefix` (no replacement; this flag was unsupported and caused undefined behavior when set) - -The following flags have been _renamed_: - -* `-server.log-source-ips-enabled` has been renamed to `-server.log.source-ips.enabled` -* `-server.log-source-ips-header` has been renamed to `-server.log.source-ips.header` -* `-server.log-source-ips-regex` has been renamed to `-server.log.source-ips.regex` -* `-server.http-listen-network` has been renamed to `-server.http.network` -* `-server.http-conn-limit` has been renamed to `-server.http.conn-limit` -* `-server.http-read-timeout` has been renamed to `-server.http.read-timeout` -* `-server.http-write-timeout` has been renamed to `-server.http.write-timeout` -* `-server.http-idle-timeout` has been renamed to `-server.http.idle-timeout` -* `-server.grpc-listen-network` has been renamed to `-server.grpc.network` -* `-server.grpc-conn-limit` has been renamed to `-server.grpc.conn-limit` -* `-server.grpc-max-recv-msg-size-bytes` has been renamed to `-server.grpc.max-recv-msg-size-bytes` -* `-server.grpc-max-send-msg-size-bytes` has been renamed to `-server.grpc.max-send-msg-size-bytes` -* `-server.grpc-max-concurrent-streams` has been renamed to `-server.grpc.max-concurrent-streams` - -### Breaking change: New TLS flags required for enabling TLS - -The two new flags, `-server.http.enable-tls` and `-server.grpc.enable-tls` now -must be provided for TLS support to be enabled. - -This is a change over the previous behavior where TLS was automatically enabled -when a certificate pair was provided. - -### Breaking change: Default HTTP/gRPC address changes - -The HTTP and gRPC listen addresses now default to `127.0.0.1:12345` and -`127.0.0.1:12346` respectively. - -If running inside of a container, you must change these to `0.0.0.0` to -externally communicate with the agent's HTTP server. - -The listen addresses may be changed via `-server.http.address` and -`-server.grpc.address` respectively. - -### Breaking change: Removal of `-reload-addr` and `-reload-port` flags - -The `-reload-addr` and `-reload-port` flags have been removed. They were -initially added to workaround an issue where reloading a changed server block -would cause the primary HTTP server to restart. As the HTTP server settings are -now static, this can no longer happen, and as such the flags have been removed. - -### Change: In-memory autoscrape for integrations-next - -This change is only relevant to those using the `integrations-next` feature flag. - -In-memory connections will now be used for autoscraping-enabled integrations. -This is a change over the previous behavior where autoscraping integrations -would connect to themselves over the network. As a result of this change, the -`integrations.client_config` field is no longer necessary and has been removed. - -## v0.22 - -### `node_exporter` integration deprecated field names - -The following field names for the `node_exporter` integration are now deprecated: - -* `netdev_device_whitelist` is deprecated in favor of `netdev_device_include`. -* `netdev_device_blacklist` is deprecated in favor of `netdev_device_exclude`. -* `systemd_unit_whitelist` is deprecated in favor of `systemd_unit_include`. -* `systemd_unit_blacklist` is deprecated in favor of `systemd_unit_exclude`. -* `filesystem_ignored_mount_points` is deprecated in favor of - `filesystem_mount_points_exclude`. -* `filesystem_ignored_fs_types` is deprecated in favor of - `filesystem_fs_types_exclude`. - -This change aligns with the equivalent flag names also being deprecated in the -upstream node_exporter. - -Support for the old field names will be removed in a future version. A warning -will be logged if using the old field names when the integration is enabled. - -## v0.21.2, v0.20.1 - -### Disabling of config retrieval endpoints - -These two patch releases, as part of a fix for -[CVE-2021-41090](https://github.com/grafana/agent/security/advisories/GHSA-9c4x-5hgq-q3wh), -disable the `/-/config` and `/agent/api/v1/configs/{name}` endpoints by -default. Pass the `--config.enable-read-api` flag at the command line to -re-enable them. - -## v0.21 - -### Integrations: Change in how instance labels are handled (Breaking change) - -Integrations will now use a SUO-specific `instance` label value. Integrations -that apply to a whole machine or agent will continue to use `:`, but integrations that connect to an external -system will now infer an appropriate value based on the config for that specific -integration. Please refer to the documentation for each integration for which -defaults are used. - -*Note:* In some cases, a default value for `instance` cannot be inferred. This -is the case for mongodb_exporter and postgres_exporter if more than one SUO is -being connected to. In these cases, the instance value can be manually set by -configuring the `instance` field on the integration. This can also be useful if -two agents infer the same value for instance for the same integration. - -As part of this change, the `agent_hostname` label is permanently affixed to -self-scraped integrations and cannot be disabled. This disambiguates multiple -agents using the same instance label for an integration, and allows users to -identify which agents need to be updated with an override for `instance`. - -Both `use_hostname_label` and `replace_instance_label` are now both deprecated -and ignored from the YAML file, permanently treated as true. A future release -will remove these fields, causing YAML errors on load instead of being silently -ignored. - -## v0.20 - -### Traces: Changes to receiver's TLS config (Breaking change). - -Upgrading to OpenTelemetry v0.36.0 contains a change in the receivers TLS config. -TLS params have been changed from being squashed to being in its own block. -This affect the jaeger receiver's `remote_sampling` config. - -Example old config: - -```yaml -receivers: - jaeger: - protocols: - grpc: null, - remote_sampling: - strategy_file: - insecure: true -``` - -Example new config: - -```yaml -receivers: - jaeger: - protocols: - grpc: null, - remote_sampling: - strategy_file: - tls: - insecure: true -``` - -### Traces: push_config is no longer supported (Breaking change) - -`push_config` was deprecated in favor of `remote_write` in v0.14.0, while -maintaining backwards compatibility. -Refer to the [deprecation announcement](#tempo-push_config-deprecation) for how to upgrade. - -### Traces: legacy OTLP gRPC port no longer default port - -OTLP gRPC receivers listen at port `4317` by default, instead of at port `55680`. -This goes in line with OTLP legacy port deprecation. - -To upgrade, point the client instrumentation push endpoint to `:4317` if using -the default OTLP gRPC endpoint. - -## v0.19 - -### Traces: Deprecation of "tempo" in config and metrics. (Deprecation) - -The term `tempo` in the config has been deprecated of favor of `traces`. This -change is to make intent clearer. - -Example old config: - -```yaml -tempo: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: -``` - -Example of new config: -```yaml -traces: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: -``` - -Any tempo metrics have been renamed from `tempo_*` to `traces_*`. - - -### Tempo: split grouping by trace from tail sampling config (Breaking change) - -Load balancing traces between agent instances has been moved from an embedded -functionality in tail sampling to its own configuration block. -This is done due to more processor benefiting from receiving consistently -receiving all spans for a trace in the same agent to be processed, such as -service graphs. - -As a consequence, `tail_sampling.load_balancing` has been deprecated in favor of -a `load_balancing` block. Also, `port` has been renamed to `receiver_port` and -moved to the new `load_balancing` block. - -Example old config: - -```yaml -tail_sampling: - policies: - - always_sample: - port: 4318 - load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent - port: 4318 -``` - -Example new config: - -```yaml -tail_sampling: - policies: - - always_sample: -load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent - port: 4318 - receiver_port: 4318 -``` - -### Metrics: Deprecation of "prometheus" in config. (Deprecation) - -The term `prometheus` in the config has been deprecated of favor of `metrics`. This -change is to make it clearer when referring to Prometheus or another -Prometheus-like database, and configuration of Grafana Agent to send metrics to -one of those systems. - -Old configs will continue to work for now, but support for the old format will -eventually be removed. To migrate your config, change the `prometheus` key to -`metrics`. - -Example old config: - -```yaml -prometheus: - configs: - - name: default - host_filter: false - scrape_configs: - - job_name: local_scrape - static_configs: - - targets: ['127.0.0.1:12345'] - labels: - cluster: 'localhost' - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -Example new config: - -```yaml -metrics: - configs: - - name: default - host_filter: false - scrape_configs: - - job_name: local_scrape - static_configs: - - targets: ['127.0.0.1:12345'] - labels: - cluster: 'localhost' - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -### Tempo: prom_instance rename (Breaking change) - -As part of `prometheus` being renamed to `metrics`, the spanmetrics -`prom_instance` field has been renamed to `metrics_instance`. This is a breaking -change, and the old name will no longer work. - -Example old config: - -```yaml -tempo: - configs: - - name: default - spanmetrics: - prom_instance: default -``` - -Example new config: - -```yaml -tempo: - configs: - - name: default - spanmetrics: - metrics_instance: default -``` - -### Logs: Deprecation of "loki" in config. (Deprecation) - -The term `loki` in the config has been deprecated of favor of `logs`. This -change is to make it clearer when referring to Grafana Loki, and -configuration of Grafana Agent to send logs to Grafana Loki. - -Old configs will continue to work for now, but support for the old format will -eventually be removed. To migrate your config, change the `loki` key to `logs`. - -Example old config: - -```yaml -loki: - positions_directory: /tmp/loki-positions - configs: - - name: default - clients: - - url: http://localhost:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: ['localhost'] - labels: - job: varlogs - __path__: /var/log/*log -``` - -Example new config: - -```yaml -logs: - positions_directory: /tmp/loki-positions - configs: - - name: default - clients: - - url: http://localhost:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: ['localhost'] - labels: - job: varlogs - __path__: /var/log/*log -``` - -### Tempo: Deprecation of "loki" in config. (Deprecation) - -As part of the `loki` to `logs` rename, parts of the automatic_logging component -in Tempo have been updated to refer to `logs_instance` instead. - -Old configurations using `loki_name`, `loki_tag`, or `backend: loki` will -continue to work as of this version, but support for the old config format -will eventually be removed. - -Example old config: - -```yaml -tempo: - configs: - - name: default - automatic_logging: - backend: loki - loki_name: default - spans: true - processes: true - roots: true - overrides: - loki_tag: tempo -``` - -Example new config: - -```yaml -tempo: - configs: - - name: default - automatic_logging: - backend: logs_instance - logs_instance_name: default - spans: true - processes: true - roots: true - overrides: - logs_instance_tag: tempo -``` - -## v0.18 - -### Tempo: Remote write TLS config - -Tempo `remote_write` now supports configuring TLS settings in the trace -exporter's client. `insecure_skip_verify` is moved into this setting's block. - -Old configurations with `insecure_skip_verify` outside `tls_config` will continue -to work as of this version, but support will eventually be removed. -If both `insecure_skip_verify` and `tls_config.insecure_skip_verify` are used, -then the latter take precedence. - -Example old config: - -``` -tempo: - configs: - - name: default - remote_write: - - endpoint: otel-collector:55680 - insecure: true - insecure_skip_verify: true -``` - -Example new config: - -``` -tempo: - configs: - - name: default - remote_write: - - endpoint: otel-collector:55680 - insecure: true - tls_config: - insecure_skip_verify: true -``` - -## v0.15 - -### Tempo: `automatic_logging` changes - -Tempo automatic logging previously assumed that the operator wanted to log -to a Loki instance. With the addition of an option to log to stdout a new -field is required to maintain the old behavior. - -Example old config: - -``` -tempo: - configs: - - name: default - automatic_logging: - loki_name: -``` - -Example new config: - -``` -tempo: - configs: - - name: default - automatic_logging: - backend: loki - loki_name: -``` - -## v0.14 - -### Scraping Service security change - -v0.14.0 changes the default behavior of the scraping service config management -API to reject all configuration files that read credentials from a file on disk. -This prevents malicious users from crafting an instance config file that read -arbitrary files on disk and send their contents to remote endpoints. - -To revert to the old behavior, add `dangerous_allow_reading_files: true` in your -`scraping_service` config. - -Example old config: - -```yaml -prometheus: - scraping_service: - # ... -``` - -Example new config: - -```yaml -prometheus: - scraping_service: - dangerous_allow_reading_files: true - # ... -``` - -### SigV4 config change - -v0.14.0 updates the internal Prometheus dependency to 2.26.0, which includes -native support for SigV4, but uses a slightly different configuration structure -than the Grafana Agent did. - -To migrate, remove the `enabled` key from your `sigv4` configs. If `enabled` was -the only key, define sigv4 as an empty object: `sigv4: {}`. - -Example old config: - -```yaml -sigv4: - enabled: true - region: us-east-1 -``` - -Example new config: - -```yaml -sigv4: - region: us-east-1 -``` - -### Tempo: `push_config` deprecation - -`push_config` is now deprecated in favor of a `remote_write` array which allows for sending spans to multiple endpoints. -`push_config` will be removed in a future release, and it is recommended to migrate to `remote_write` as soon as possible. - -To migrate, move the batch options outside the `push_config` block. -Then, add a `remote_write` array and move the remaining of your `push_config` block inside it. - -Example old config: - -```yaml -tempo: - configs: - - name: default - receivers: - otlp: - protocols: - gpc: - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -Example migrated config: - -```yaml -tempo: - configs: - - name: default - receivers: - otlp: - protocols: - gpc: - remote_write: - - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - - -## v0.12 - -v0.12 had two breaking changes: the `tempo` and `loki` sections have been changed to require a list of `tempo`/`loki` configs rather than just one. - -### Tempo Config Change - -The Tempo config (`tempo` in the config file) has been changed to store -configs within a `configs` list. This allows for defining multiple Tempo -instances for collecting traces and forwarding them to different OTLP -endpoints. - -To migrate, add a `configs:` array and move your existing config inside of it. -Give the element a `name: default` field. - -Each config must have a unique non-empty name. `default` is recommended for users -that don't have other configs. The name of the config will be added as a -`tempo_config` label for metrics. - -Example old config: - -```yaml -tempo: - receivers: - jaeger: - protocols: - thrift_http: - attributes: - actions: - - action: upsert - key: env - value: prod - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -Example migrated config: - -```yaml -tempo: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: - attributes: - actions: - - action: upsert - key: env - value: prod - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -### Loki Promtail Config Change - -The Loki Promtail config (`loki` in the config file) has been changed to store -configs within a `configs` list. This allows for defining multiple Loki -Promtail instances for collecting logs and forwarding them to different Loki -servers. - -To migrate, add a `configs:` array and move your existing config inside of it. -Give the element a `name: default` field. - -Each config must have a unique non-empty name. `default` is recommended for users -that don't have other configs. The name of the config will be added as a -`loki_config` label for Loki Promtail metrics. - -Example old config: - -```yaml -loki: - positions: - filename: /tmp/positions.yaml - clients: - - url: http://loki:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: - - localhost - labels: - job: varlogs - __path__: /var/log/*log -``` - -Example migrated config: - -```yaml -loki: - configs: - - name: default - positions: - filename: /tmp/positions.yaml - clients: - - url: http://loki:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: - - localhost - labels: - job: varlogs - __path__: /var/log/*log -``` diff --git a/docs/sources/static/set-up/_index.md b/docs/sources/static/set-up/_index.md deleted file mode 100644 index 93fb9171bf..0000000000 --- a/docs/sources/static/set-up/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- ../set-up/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/ -- /docs/grafana-cloud/send-data/agent/static/set-up/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/ -description: Learn how to set up Grafana Agent in static mode -menuTitle: Set up static mode -title: Set up Grafana Agent in static mode -weight: 100 ---- - -# Set up Grafana Agent in static mode - -This section includes information that helps you get Grafana Agent in static mode installed and configured. - -{{< section >}} diff --git a/docs/sources/static/set-up/deploy-agent.md b/docs/sources/static/set-up/deploy-agent.md deleted file mode 100644 index 5325d3b71c..0000000000 --- a/docs/sources/static/set-up/deploy-agent.md +++ /dev/null @@ -1,393 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/static/set-up/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/deploy-agent/ -description: Learn how to deploy Grafana Agent in different topologies -menuTitle: Deploy static mode -title: Deploy Grafana Agent in static mode -weight: 300 ---- - -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} - -## For scalable ingestion of traces - -For small workloads, it is normal to have just one Agent handle all incoming spans with no need of load balancing. -However, for large workloads it is desirable to spread out the load of processing spans over multiple Agent instances. - -To scale the Agent for trace ingestion, do the following: -1. Set up the `load_balancing` section of the Agent's `traces` config. -2. Start multiple Agent instances, all with the same configuration, so that: - * Each Agent load balances using the same strategy. - * Each Agent processes spans in the same way. -3. The cluster of Agents is now setup for load balancing. It works as follows: - 1. Any of the Agents can receive spans from instrumented applications via the configured `receivers`. - 2. When an Agent firstly receives spans, it will forward them to any of the Agents in the cluster according to the `load_balancing` configuration. - - - -### tail_sampling - -If some of the spans for a trace end up in a different Agent, `tail_sampling` will not sample correctly. -Enabling `load_balancing` is necessary if `tail_sampling` is enabled and when there could be more than one Agent instance processing spans for the same trace. -`load_balancing` will make sure that all spans of a given trace will be processed by the same Agent instance. - -### spanmetrics - -All spans for a given `service.name` must be processed by the same `spanmetrics` Agent. -To make sure that this is the case, set up `load_balancing` with `routing_key: service`. - -### service_graphs - -It is challenging to scale `service_graphs` over multiple Agent instances. -* For `service_graphs` to work correctly, each "client" span must be paired - with a "server" span in order to calculate metrics such as span duration. -* If a "client" span goes to one Agent, but a "server" span goes to another Agent, - then no single Agent will be able to pair the spans and a metric won't be generated. - -`load_balancing` can solve this problem partially if it is configured with `routing_key: traceID`. - * Each Agent will then be able to calculate service graph for each "client"/"server" pair in a trace. - * However, it is possible to have a span with similar "server"/"client" values - in a different trace, processed by another Agent. - * If two different Agents process similar "server"/"client" spans, - they will generate the same service graph metric series. - * If the series from two Agents are the same, this will lead to issues - when writing them to the backend database. - * Users could differentiate the series by adding a label such as `"agent_id"`. - * Unfortunately, there is currently no method in the Agent to aggregate those series from different Agents and merge them into one series. - * A PromQL query could be used to aggregate the metrics from different Agents. - * If the metrics are stored in Grafana Mimir, cardinality issues due to `"agent_id"` labels can be solved using [Adaptive Metrics][adaptive-metrics]. - -A simpler, more scalable alternative to generating service graph metrics in the Agent is to generate them entirely in the backend database. -For example, service graphs can be [generated][tempo-servicegraphs] in Grafana Cloud by the Tempo traces database. - -[tempo-servicegraphs]: https://grafana.com/docs/tempo/latest/metrics-generator/service_graphs/ -[adaptive-metrics]: https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/ - -### Example Kubernetes configuration -{{< collapse title="Example Kubernetes configuration with DNS load balancing" >}} -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - ports: - - name: agent-traces-otlp-grpc - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - name: agent-traces ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k6-trace-generator - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: k6-trace-generator - template: - metadata: - labels: - name: k6-trace-generator - spec: - containers: - - env: - - name: ENDPOINT - value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 - image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 - imagePullPolicy: IfNotPresent - name: k6-trace-generator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: agent-traces - template: - metadata: - labels: - name: agent-traces - spec: - containers: - - args: - - -config.file=/etc/agent/agent.yaml - command: - - /bin/grafana-agent - image: grafana/agent:v0.38.0 - imagePullPolicy: IfNotPresent - name: agent-traces - ports: - - containerPort: 9411 - name: otlp-grpc - protocol: TCP - - containerPort: 34621 - name: agent-lb - protocol: TCP - volumeMounts: - - mountPath: /etc/agent - name: agent-traces - volumes: - - configMap: - name: agent-traces - name: agent-traces ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces-headless - namespace: grafana-cloud-monitoring -spec: - clusterIP: None - ports: - - name: agent-lb - port: 34621 - protocol: TCP - targetPort: agent-lb - selector: - name: agent-traces - type: ClusterIP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -data: - agent.yaml: | - traces: - configs: - - name: default - load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local - port: 34621 - timeout: 5s - interval: 60s - receiver_port: 34621 - receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:9411 - remote_write: - - basic_auth: - username: 111111 - password: pass - endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 - retry_on_failure: - enabled: false -``` -{{< /collapse >}} - -{{< collapse title="Example Kubernetes configuration with Kubernetes load balancing" >}} - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-traces - namespace: grafana-cloud-monitoring ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: grafana-agent-traces-role - namespace: grafana-cloud-monitoring -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: grafana-agent-traces-rolebinding - namespace: grafana-cloud-monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: grafana-agent-traces-role -subjects: -- kind: ServiceAccount - name: grafana-agent-traces - namespace: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - ports: - - name: agent-traces-otlp-grpc - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - name: agent-traces ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k6-trace-generator - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: k6-trace-generator - template: - metadata: - labels: - name: k6-trace-generator - spec: - containers: - - env: - - name: ENDPOINT - value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 - image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 - imagePullPolicy: IfNotPresent - name: k6-trace-generator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: agent-traces - template: - metadata: - labels: - name: agent-traces - spec: - containers: - - args: - - -config.file=/etc/agent/agent.yaml - command: - - /bin/grafana-agent - image: grafana/agent:v0.38.0 - imagePullPolicy: IfNotPresent - name: agent-traces - ports: - - containerPort: 9411 - name: otlp-grpc - protocol: TCP - - containerPort: 34621 - name: agent-lb - protocol: TCP - volumeMounts: - - mountPath: /etc/agent - name: agent-traces - serviceAccount: grafana-agent-traces - volumes: - - configMap: - name: agent-traces - name: agent-traces ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces-headless - namespace: grafana-cloud-monitoring -spec: - clusterIP: None - ports: - - name: agent-lb - port: 34621 - protocol: TCP - targetPort: agent-lb - selector: - name: agent-traces - type: ClusterIP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -data: - agent.yaml: | - traces: - configs: - - name: default - load_balancing: - exporter: - insecure: true - resolver: - kubernetes: - service: agent-traces-headless - ports: - - 34621 - receiver_port: 34621 - receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:9411 - remote_write: - - basic_auth: - username: 111111 - password: pass - endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 - retry_on_failure: - enabled: false``` -``` - -{{< /collapse >}} - -You need to fill in correct OTLP credentials prior to running the above examples. -The example above can be started by using [k3d][]: - -```bash -k3d cluster create grafana-agent-lb-test -kubectl apply -f kubernetes_config.yaml -``` - -To delete the cluster, run: -```bash -k3d cluster delete grafana-agent-lb-test -``` - -[k3d]: https://k3d.io/v5.6.0/ diff --git a/docs/sources/static/set-up/install/_index.md b/docs/sources/static/set-up/install/_index.md deleted file mode 100644 index 3e62fdbdf8..0000000000 --- a/docs/sources/static/set-up/install/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- ../ -- ../set-up/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/ -description: Learn how to install GRafana Agent in static mode -menuTitle: Install static mode -title: Install Grafana Agent in static mode -weight: 100 ---- - -# Install Grafana Agent in static mode - -You can install Grafana Agent in static mode on Docker, Kubernetes, Linux, macOS, or Windows. - -The following architectures are supported: - -- Linux: AMD64, ARM64 -- Windows: AMD64 -- macOS: AMD64 (Intel), ARM64 (Apple Silicon) -- FreeBSD: AMD64 - -{{< admonition type="note" >}} -ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{< /admonition >}} - -{{< section >}} - -{{< admonition type="note" >}} -Installing Grafana Agent on other operating systems is possible, but is not recommended or supported. -{{< /admonition >}} - -## Grafana Cloud - -Use the Grafana Agent [Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/) or follow instructions for installing the Grafana Agent in the [Walkthrough](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). - -## Data collection - -By default, Grafana Agent sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information -about what data is collected and how you can opt-out. - -{{% docs/reference %}} -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/static/set-up/install/install-agent-binary.md b/docs/sources/static/set-up/install/install-agent-binary.md deleted file mode 100644 index 8d53d83768..0000000000 --- a/docs/sources/static/set-up/install/install-agent-binary.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-binary/ -- ../set-up/install-agent-binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-binary/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-binary/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-binary/ -description: Learn how to install Grafana Agent in static mode as a standalone binary -menuTitle: Standalone -title: Install Grafana Agent in static mode as a standalone binary -weight: 700 ---- - -# Install Grafana Agent in static mode as a standalone binary - -Grafana Agent is distributed as a standalone binary for the following operating systems and architectures: - -* Linux: AMD64, ARM64, PPC64, S390X -* macOS: AMD64, (Intel), ARM64 (Apple Silicon) -* Windows: AMD64 - -{{< admonition type="note" >}} -ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{< /admonition >}} - -The binary executable will run Grafana Agent in standalone mode. If you want to run Grafana Agent as a service, refer to the installation instructions for: - -* [Linux][linux] -* [macOS][macos] -* [Windows][windows] - -## Download Grafana Agent - -To download the Grafana Agent as a standalone binary, perform the following steps. - -1. Navigate to the current Grafana Agent [release](https://github.com/grafana/agent/releases) page. - -1. Scroll down to the **Assets** section. - -1. Download the `grafana-agent` zip file that matches your operating system and machine’s architecture. - -1. Extract the package contents into a directory. - -1. If you are installing Grafana Agent on Linux, macOS, or FreeBSD, run the following command in a terminal: - - ```shell - chmod +x EXTRACTED_BINARY - ``` - -## Next steps - -* [Start Grafana Agent][start] -* [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[linux]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-linux" -[linux]: "/docs/grafana-cloud/ -> ./install-agent-linux" -[macos]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-macos" -[macos]: "/docs/grafana-cloud/ -> ./install-agent-macos" -[windows]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-on-windows" -[windows]: "/docs/grafana-cloud/ -> ./install-agent-on-windows" -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent#standalone-binary" -[start]: "/docs/grafana-cloud/ -> ../start-agent#standalone-binary" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> ../../configuration" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md deleted file mode 100644 index bece555966..0000000000 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-docker/ -- ../set-up/install-agent-docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-docker/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-docker/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-docker/ -description: Learn how to run Grafana Agent in static mode in a Docker container -menuTitle: Docker -title: Run Grafana Agent in static mode in a Docker container -weight: 200 ---- - -# Run Grafana Agent in static mode in a Docker container - -Grafana Agent is available as a Docker container image on the following platforms: - -* [Linux containers][] for AMD64 and ARM64. -* [Windows containers][] for AMD64. - -[Linux containers]: #run-a-linux-docker-container -[Windows containers]: #run-a-windows-docker-container - -## Before you begin - -* Install [Docker][] on your computer. -* Create and save a Grafana Agent YAML [configuration file][configure] on your computer. - -[Docker]: https://docker.io - -## Run a Linux Docker container - -To run a Grafana Agent Docker container on Linux, run the following command in a terminal window: - -```shell -docker run \ - -v WAL_DATA_DIRECTORY:/etc/agent/data \ - -v CONFIG_FILE_PATH:/etc/agent/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} -``` - -Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. - -{{< admonition type="note" >}} -For the flags to work correctly, you must expose the paths on your Linux host to the Docker container through a bind mount. -{{< /admonition >}} - -## Run a Windows Docker container - -To run a Grafana Agent Docker container on Windows, run the following command in a Windows command prompt: - -```shell -docker run ^ - -v WAL_DATA_DIRECTORY:C:\etc\grafana-agent\data ^ - -v CONFIG_FILE_PATH:C:\etc\grafana-agent ^ - grafana/agent:{{< param "AGENT_RELEASE" >}}-windows -``` - -Replace the following: - -* `CONFIG_FILE_PATH`: The configuration file path on your Windows host system. -* `WAL_DATA_DIRECTORY`: the directory used to store your metrics before sending them to Prometheus. Old WAL data is cleaned up every hour and is used for recovery if the process crashes. - -{{< admonition type="note" >}} -For the flags to work correctly, you must expose the paths on your Windows host to the Docker container through a bind mount. -{{< /admonition >}} - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-kubernetes.md b/docs/sources/static/set-up/install/install-agent-kubernetes.md deleted file mode 100644 index d55a7d9af2..0000000000 --- a/docs/sources/static/set-up/install/install-agent-kubernetes.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-kubernetes/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-kubernetes/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-kubernetes/ -description: Learn how to deploy Grafana Agent in static mode on Kubernetes -menuTitle: Kubernetes -title: Deploy Grafana Agent in static mode on Kubernetes -weight: 300 ---- - -# Deploy Grafana Agent in static mode on Kubernetes - -You can use the Helm chart for Grafana Agent to deploy Grafana Agent in static mode on Kubernetes. - -## Before you begin - -* Install [Helm][] on your computer. -* Configure a Kubernetes cluster that you can use for Grafana Agent. -* Configure your local Kubernetes context to point to the cluster. - -[Helm]: https://helm.sh - -## Deploy - -{{< admonition type="note" >}} -These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana Agent. -You can deploy Grafana Agent in static mode or flow mode. The Helm chart deploys flow mode by default. -{{< /admonition >}} - -To deploy Grafana Agent in static mode on Kubernetes using Helm, run the following commands in a terminal window: - -1. Add the Grafana Helm chart repository: - - ```shell - helm repo add grafana https://grafana.github.io/helm-charts - ``` - -1. Update the Grafana Helm chart repository: - - ```shell - helm repo update - ``` - -1. Install Grafana Agent in static mode: - - ```shell - helm install grafana/grafana-agent --set agent.mode=static - ``` - - Replace the following: - - - _``_: The name to use for your Grafana Agent installation, such as `grafana-agent`. - - {{< admonition type="warning" >}} - Always pass `--set agent.mode=static` in `helm install` or `helm upgrade` commands to ensure Grafana Agent gets installed in static mode. - Alternatively, set `agent.mode` to `static` in your values.yaml file. - {{< /admonition >}} - -For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. - -[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent - diff --git a/docs/sources/static/set-up/install/install-agent-linux.md b/docs/sources/static/set-up/install/install-agent-linux.md deleted file mode 100644 index 716a48df2a..0000000000 --- a/docs/sources/static/set-up/install/install-agent-linux.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-linux/ -- ../set-up/install-agent-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-linux/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-linux/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-linux/ -description: Learn how to install Grafana Agent in static mode on Linux -menuTitle: Linux -title: Install Grafana Agent in static mode on Linux -weight: 400 ---- - -# Install Grafana Agent in static mode on Linux - -You can install Grafana Agent in static mode on Linux. - -## Install on Debian or Ubuntu - -To install Grafana Agent in static mode on Debian or Ubuntu, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository: - - ```shell - sudo mkdir -p /etc/apt/keyrings/ - wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null - echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee /etc/apt/sources.list.d/grafana.list - ``` - -1. Update the repositories: - - ```shell - sudo apt-get update - ``` - -1. Install Grafana Agent: - - ```shell - sudo apt-get install grafana-agent - ``` - -### Uninstall on Debian or Ubuntu - -To uninstall Grafana Agent on Debian or Ubuntu, run the following commands in a terminal window. - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ``` - -1. Uninstall Grafana Agent: - - ```shell - sudo apt-get remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo rm -i /etc/apt/sources.list.d/grafana.list - ``` - -## Install on RHEL or Fedora - -To install Grafana Agent in static mode on RHEL or Fedora, run the following commands in a terminal window. - -1. Import the GPG key: - - ```shell - wget -q -O gpg.key https://rpm.grafana.com/gpg.key - sudo rpm --import gpg.key - ``` - -1. Create `/etc/yum.repos.d/grafana.repo` with the following content: - - ```shell - [grafana] - name=grafana - baseurl=https://rpm.grafana.com - repo_gpgcheck=1 - enabled=1 - gpgcheck=1 - gpgkey=https://rpm.grafana.com/gpg.key - sslverify=1 - sslcacert=/etc/pki/tls/certs/ca-bundle.crt - ``` - -1. Optional: Verify the Grafana repository configuration: - - ```shell - cat /etc/yum.repos.d/grafana.repo - ``` - -1. Install Grafana Agent: - - ```shell - sudo dnf install grafana-agent - ``` - -### Uninstall on RHEL or Fedora - -To uninstall Grafana Agent on RHEL or Fedora, run the following commands in a terminal window: - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ``` - -1. Uninstall Grafana Agent: - - ```shell - sudo dnf remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo rm -i /etc/yum.repos.d/rpm.grafana.repo - ``` - -## Install on SUSE or openSUSE - -To install Grafana Agent in static mode on SUSE or openSUSE, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository: - - ```shell - wget -q -O gpg.key https://apt.grafana.com/gpg.key - sudo rpm --import gpg.key - sudo zypper addrepo https://rpm.grafana.com grafana - ``` - -1. Update the repositories: - - ```shell - sudo zypper update - ``` - -1. Install Grafana Agent: - - ```shell - sudo zypper install grafana-agent - ``` - -### Uninstall on SUSE or openSUSE - -To uninstall Grafana Agent on SUSE or openSUSE, run the following commands in a terminal: - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ```` - -1. Uninstall Grafana Agent: - - ```shell - sudo zypper remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo zypper removerepo grafana - ``` - -## Operation guide - -The Grafana Agent is configured as a [systemd](https://systemd.io/) service. - -### Start the Agent - -To run Grafana Agent, run the following in a terminal: - - ```shell - sudo systemctl start grafana-agent - ``` - -To check the status of Grafana Agent, run the following command in a terminal: - - ```shell - sudo systemctl status grafana-agent - ``` - -### Run Grafana Agent on startup - -To automatically run Grafana Agent when the system starts, run the following command in a terminal: - - ```shell - sudo systemctl enable grafana-agent.service - ``` - -### Configuring Grafana Agent - -To configure Grafana Agent when installed on Linux, perform the following steps: - -1. Edit the default configuration file at `/etc/grafana-agent.yaml`. - -1. Run the following command in a terminal to reload the configuration file: - - ```shell - sudo systemctl reload grafana-agent - ``` - -### View Grafana Agent logs - -Logs of Grafana Agent can be found by running the following command in a terminal: - - ```shell - sudo journalctl -u grafana-agent - ``` - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-macos.md b/docs/sources/static/set-up/install/install-agent-macos.md deleted file mode 100644 index c23bd59ec5..0000000000 --- a/docs/sources/static/set-up/install/install-agent-macos.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-macos/ -- ../set-up/install-agent-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-macos/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-macos/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-macos/ -description: Learn how to install Grafana Agent in static mode on macOS -menuTitle: macOS -title: Install Grafana Agent in static mode on macOS -weight: 500 ---- - -# Install Grafana Agent in static mode on macOS - -You can install Grafana Agent in static mode on macOS with Homebrew. - -## Before you begin - -Install [Homebrew][] on your computer. - -{{< admonition type="note" >}} -The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{< /admonition >}} - -[Homebrew]: https://brew.sh - -## Install - -To install Grafana Agent on macOS, run the following commands in a terminal window. - -1. Update Homebrew: - - ```shell - brew update - ``` - -1. Install Grafana Agent: - - ```shell - brew install grafana-agent - ``` - -Grafana Agent is installed by default at `$(brew --prefix)/Cellar/grafana-agent/VERSION`. - -## Upgrade - -To upgrade Grafana Agent on macOS, run the following commands in a terminal window. - -1. Upgrade Grafana Agent: - - ```shell - brew upgrade grafana-agent - ``` - -1. Restart Grafana Agent: - - ```shell - brew services restart grafana-agent - -## Uninstall - -To uninstall Grafana Agent on macOS, run the following command in a terminal window: - -```shell -brew uninstall grafana-agent -``` - -## Configure - -1. To create the Agent `config.yml` file, open a terminal and run the following command: - - ```shell - touch $(brew --prefix)/etc/grafana-agent/config.yml - ``` - -1. Edit `$(brew --prefix)/etc/grafana-agent/config.yml` and add the configuration blocks for your specific telemetry needs. Refer to [Configure Grafana Agent][configure] for more information. - -{{< admonition type="note" >}} -To send your data to Grafana Cloud, set up Grafana Agent using the Grafana Cloud integration. Refer to [how to install an integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) and [macOS integration](/docs/grafana-cloud/data-configuration/integrations/integration-reference/integration-macos-node/). -{{< /admonition >}} - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-on-windows.md b/docs/sources/static/set-up/install/install-agent-on-windows.md deleted file mode 100644 index ddda581a53..0000000000 --- a/docs/sources/static/set-up/install/install-agent-on-windows.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-on-windows/ -- ../set-up/install-agent-on-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-on-windows/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-on-windows/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-on-windows/ -description: Learn how to install Grafana Agent in static mode on Windows -menuTitle: Windows -title: Install Grafana Agent in static mode on Windows -weight: 600 ---- - -# Install Grafana Agent in static mode on Windows - -You can install Grafana Agent in static mode on Windows as a standard graphical install, or as a silent install. - -## Standard install - -To do a standard graphical install of Grafana Agent on Windows, perform the following steps. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Double-click on `grafana-agent-installer.exe` to install Grafana Agent. - - Grafana Agent is installed into the default directory `C:\Program Files\Grafana Agent`. - - The following options are available: - - - The [windows_exporter integration][windows_exporter_config] can be enabled with all default windows_exporter options. - - The [-config.expand-env][flags] command line flag can be enabled. - -## Silent install - -To do a silent install of Grafana Agent on Windows, perform the following steps. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S - ``` - - Replace `PATH_TO_INSTALLER` with the path where the unzipped installer executable is located. - -## Silent install with `remote_write` - -If you are using `remote_write` you must enable Windows Exporter and set the global remote_write configuration. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S /EnableExporter true /Username USERNAME /Password PASSWORD /Url "http://example.com" - ``` - - Replace the following: - - - `PATH_TO_INSTALLER`: The path where the unzipped installer executable is located. - - `USERNAME`: Your username - - `PASSWORD`: Your password - - If you are using Powershell, make sure you use triple quotes `"""http://example.com"""` around the URL parameter. - -## Silent install with `-config.expand_env` - -You can enable [-config.expand-env][flags] during a silent install. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S /ExpandEnv true - ``` - -## Verify the installation - -1. Make sure you can access `http://localhost:12345/-/healthy` and `http://localhost:12345/agent/api/v1/metrics/targets`. - -1. Optional: You can adjust `C:\Program Files\Grafana Agent\agent-config.yaml` to meet your specific needs. After changing the configuration file, restart the Grafana Agent service to load changes to the configuration. - -Existing configuration files are kept when re-installing or upgrading the Grafana Agent. - -## Security - -A configuration file for Grafana Agent is provided by default at `C:\Program Files\Grafana Agent`. Depending on your configuration, you can modify the default permissions of the file or move it to another directory. - -If you change the location of the configuration file, do the following step. - -1. Update the Grafana Agent service to load the new path. - -1. Run the following with Administrator privileges in PowerShell or Command Prompt: - - ```shell - sc config "Grafana Agent" binpath= "INSTALLED_DIRECTORY\agent-windows-amd64.exe -config.file=\"PATH_TO_CONFIG\agent-config.yaml\"" - ``` - - Replace `PATH_TO_CONFIG` with the full path to your Grafana Agent configuratiuon file. - -## Uninstall Grafana Agent - -You can uninstall Grafana Agent with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. -Uninstalling Grafana Agent will stop the service and remove it from disk. This includes any configuration files in the installation directory. - -Grafana Agent can also be silently uninstalled by running `uninstall.exe /S` as Administrator. - -## Push Windows logs to Grafana Loki - -Grafana Agent can use the embedded [promtail](/docs/loki/latest/clients/promtail/) to push Windows Event Logs to [Grafana Loki](https://github.com/grafana/loki). Example configuration below: - -```yaml -server: - log_level: debug -logs: - # Choose a directory to save the last read position of log files at. - # This directory will be created if it doesn't already exist. - positions_directory: "C:\\path\\to\\directory" - configs: - - name: windows - # Loki endpoint to push logs to - clients: - - url: https://example.com - scrape_configs: - - job_name: windows - windows_events: - # Note the directory structure must already exist but the file will be created on demand - bookmark_path: "C:\\path\\to\\bookmark\\directory\\bookmark.xml" - use_incoming_timestamp: false - eventlog_name: "Application" - # Filter for logs - xpath_query: '*' - labels: - job: windows -``` - -Refer to [windows_events](/docs/loki/latest/clients/promtail/configuration/#windows_events) for additional configuration details. - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" -[windows_exporter_config]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/windows-exporter-config" -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/quick-starts.md b/docs/sources/static/set-up/quick-starts.md deleted file mode 100644 index 848630ab5b..0000000000 --- a/docs/sources/static/set-up/quick-starts.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -aliases: -- ../../set-up/quick-starts/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/quick-starts/ -- /docs/grafana-cloud/send-data/agent/static/set-up/quick-starts/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/quick-starts/ -description: Learn how to get started with Grafana Agent in static mode -menuTitle: Get started -title: Grafana Agent quick starts -weight: 300 ---- - -# Grafana Agent quick starts - -The following quick starts help you get up and running with Grafana Agent. You’ll learn how to send your metrics, logs, and traces to the Grafana Stack or Grafana Cloud. - -## Grafana Stack quick starts - -- [Send metrics to Mimir](/docs/mimir/latest/get-started/) using Grafana Agent. - -- [Send traces to Tempo](/docs/tempo/latest/getting-started/#2-pipeline-grafana-agent) using Grafana Agent. - -- [Send logs to Loki](/docs/grafana-cloud/logs/collect-logs-with-agent/) using Grafana Agent. - -## Grafana Cloud quick starts - -- [Grafana Agent for Grafana Cloud](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). -- [Monitoring a Linux host](/docs/grafana-cloud/quickstart/agent_linuxnode/) using the Linux Node integration. -- [Grafana Agent Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/). diff --git a/docs/sources/static/set-up/start-agent.md b/docs/sources/static/set-up/start-agent.md deleted file mode 100644 index dfbb9b0117..0000000000 --- a/docs/sources/static/set-up/start-agent.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/start-agent/ -- /docs/grafana-cloud/send-data/agent/static/set-up/start-agent/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/start-agent/ -description: Learn how to start, restart, and stop Grafana Agent in static mode -menuTitle: Start static mode -title: Start, restart, and stop Grafana Agent in static mode -weight: 200 ---- - -# Start, restart, and stop Grafana Agent in static mode - -You can start, restart, and stop Grafana Agent after it is installed. - -## Linux - -Grafana Agent is installed as a [systemd][] service on Linux. - -[systemd]: https://systemd.io/ - -### Start Grafana Agent - -To start Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl start grafana-agent -``` - -(Optional) Verify that the service is running: - -```shell -sudo systemctl status grafana-agent -``` - -### Configure Grafana Agent to start at boot - -To automatically run Grafana Agent when the system starts, run the following command in a terminal window: - -```shell -sudo systemctl enable grafana-agent.service -``` - -### Restart Grafana Agent - -To restart Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl restart grafana-agent -``` - -### Stop Grafana Agent - -To stop Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl stop grafana-agent -``` - -### View Grafana Agent logs on Linux - -To view the Grafana Agent log files, run the following command in a terminal window: - -```shell -sudo journalctl -u grafana-agent -``` - -## macOS - -Grafana Agent is installed as a launchd service on macOS. - -### Start Grafana Agent - -To start Grafana Agent, run the following command in a terminal window: - -```shell -brew services start grafana-agent -``` - -Grafana Agent automatically runs when the system starts. - -Optional: Verify that the service is running: - -```shell -brew services info grafana-agent -``` - -### Restart Grafana Agent - -To restart Grafana Agent, run the following command in a terminal window: - -```shell -brew services restart grafana-agent -``` - -### Stop Grafana Agent - -To stop Grafana Agent, run the following command in a terminal window: - -```shell -brew services stop grafana-agent -``` - -### View Grafana Agent logs on macOS - -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent.log` and -`$(brew --prefix)/var/log/grafana-agent.err.log`. - -If you followed [Configure][configure] steps in the macOS install instructions and changed the path where logs are written, refer to your current copy of the Grafana Agent formula to locate your log files. - -## Windows - -Grafana Agent is installed as a Windows Service. The service is configured to automatically run on startup. - -To verify that Grafana Agent is running as a Windows Service: - -1. Open the Windows Services manager (services.msc): - - 1. Right click on the Start Menu and select **Run**. - - 1. Type: `services.msc` and click **OK**. - -1. Scroll down to find the **Grafana Agent** service and verify that the **Status** is **Running**. - -### View Grafana Agent logs - -When running on Windows, Grafana Agent writes its logs to Windows Event -Logs with an event source name of **Grafana Agent**. - -To view the logs, perform the following steps: - -1. Open the Event Viewer: - - 1. Right click on the Start Menu and select **Run**. - - 1. Type `eventvwr` and click **OK**. - -1. In the Event Viewer, click on **Windows Logs > Application**. - -1. Search for events with the source **Grafana Agent**. - -## Standalone binary - -If you downloaded the standalone binary, you must run the agent from a terminal or command window. - -### Start Grafana Agent on Linux, macOS, FreeBSD, or Windows - -To start Grafana Agent on Linux, macOS, FreeBSD, or Windows run the following command in a terminal window or command prompt: - -```shell -BINARY_PATH -config.file CONFIG_FILE -``` - -Replace the following: - -* `BINARY_PATH`: The path to the Grafana Agent binary file -* `CONFIG_FILE`: The path to the Grafana Agent configuration file. - -{{% docs/reference %}} -[configure]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-macos#configure" -[configure]: "/docs/grafana-cloud/ -> ./install/install-agent-macos/#configure" -{{% /docs/reference %}} diff --git a/docs/sources/tasks/_index.md b/docs/sources/tasks/_index.md new file mode 100644 index 0000000000..da85a2fe2e --- /dev/null +++ b/docs/sources/tasks/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/ +description: How to perform common tasks with Grafana Alloy +menuTitle: Tasks +title: Tasks with Grafana Alloy +weight: 200 +--- + +# Tasks with {{% param "PRODUCT_NAME" %}} + +This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tasks/collect-opentelemetry-data.md b/docs/sources/tasks/collect-opentelemetry-data.md similarity index 81% rename from docs/sources/flow/tasks/collect-opentelemetry-data.md rename to docs/sources/tasks/collect-opentelemetry-data.md index 22248f9f70..935639e96a 100644 --- a/docs/sources/flow/tasks/collect-opentelemetry-data.md +++ b/docs/sources/tasks/collect-opentelemetry-data.md @@ -1,16 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-opentelemetry-data/ -- ../getting-started/collect-opentelemetry-data/ # /docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-opentelemetry-data/ +canonical: https://grafana.com/docs/alloy/latest/tasks/collect-opentelemetry-data/ description: Learn how to collect OpenTelemetry data title: Collect OpenTelemetry data weight: 300 @@ -18,8 +7,7 @@ weight: 300 # Collect OpenTelemetry data -{{< param "PRODUCT_NAME" >}} can be configured to collect [OpenTelemetry][]-compatible -data and forward it to any OpenTelemetry-compatible endpoint. +{{< param "PRODUCT_NAME" >}} can be configured to collect [OpenTelemetry][]-compatible data and forward it to any OpenTelemetry-compatible endpoint. This topic describes how to: @@ -318,18 +306,9 @@ For more information on receiving OpenTelemetry data using the OpenTelemetry Pro [OpenTelemetry]: https://opentelemetry.io [Configure an OpenTelemetry Protocol exporter]: #configure-an-opentelemetry-protocol-exporter [Configure batching]: #configure-batching - -{{% docs/reference %}} -[otelcol.auth.basic]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.auth.basic.md" -[otelcol.auth.basic]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic.md" -[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlphttp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.exporter.otlphttp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.processor.batch]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.batch.md" -[otelcol.processor.batch]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch.md" -[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" -[otelcol.receiver.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -{{% /docs/reference %}} +[otelcol.auth.basic]: ../../reference/components/otelcol.auth.basic/ +[otelcol.exporter.otlp]: ../../reference/components/otelcol.exporter.otlp/ +[otelcol.exporter.otlphttp]: ../../reference/components/otelcol.exporter.otlphttp/ +[otelcol.processor.batch]: ../../reference/components/otelcol.processor.batch/ +[otelcol.receiver.otlp]: ../../reference/components/otelcol.receiver.otlp/ +[Components]: ../../concepts/components/ diff --git a/docs/sources/flow/tasks/collect-prometheus-metrics.md b/docs/sources/tasks/collect-prometheus-metrics.md similarity index 87% rename from docs/sources/flow/tasks/collect-prometheus-metrics.md rename to docs/sources/tasks/collect-prometheus-metrics.md index 350ce1ccfd..a317f57780 100644 --- a/docs/sources/flow/tasks/collect-prometheus-metrics.md +++ b/docs/sources/tasks/collect-prometheus-metrics.md @@ -1,16 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-prometheus-metrics/ -- ../getting-started/collect-prometheus-metrics/ # /docs/agent/latest/flow/getting-started/collect-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-prometheus-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tasks/collect-prometheus-metrics/ description: Learn how to collect and forward Prometheus metrics title: Collect and forward Prometheus metrics weight: 200 @@ -436,16 +425,8 @@ prometheus.remote_write "default" { [Field Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ [Labels and Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement [Configure metrics delivery]: #configure-metrics-delivery - -{{% docs/reference %}} -[discovery.kubernetes]: "/docs/agent/ -> /docs/agent//flow/reference/components/discovery.kubernetes.md" -[discovery.kubernetes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubernetes.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[Objects]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values.md#objects" -[Objects]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values.md#objects" -{{% /docs/reference %}} +[discovery.kubernetes]: ../../reference/components/discovery.kubernetes/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ +[prometheus.scrape]: ../../reference/components/prometheus.scrape/ +[Components]: ../../concepts/components/ +[Objects]: ../../concepts/config-language/expressions/types_and_values/#objects diff --git a/docs/sources/tasks/configure-agent-clustering.md b/docs/sources/tasks/configure-agent-clustering.md new file mode 100644 index 0000000000..024f8a5392 --- /dev/null +++ b/docs/sources/tasks/configure-agent-clustering.md @@ -0,0 +1,60 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/configure-agent-clustering/ +description: Learn how to configure Grafana Alloy clustering in an existing installation +menuTitle: Configure clustering +title: Configure Grafana Alloy clustering in an existing installation +weight: 400 +--- + +# Configure {{% param "PRODUCT_NAME" %}} clustering in an existing installation + +You can configure {{< param "PRODUCT_NAME" >}} to run with [clustering][] so that individual {{< param "PRODUCT_ROOT_NAME" >}}s can work together for workload distribution and high availability. + +{{< admonition type="note" >}} +Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. + +[beta]: ../../stability/#beta +{{< /admonition >}} + +This topic describes how to add clustering to an existing installation. + +## Configure {{% param "PRODUCT_NAME" %}} clustering with Helm Chart + +This section guides you through enabling clustering when {{< param "PRODUCT_NAME" >}} is installed on Kubernetes using the {{< param "PRODUCT_ROOT_NAME" >}} [Helm chart][install-helm]. + +### Before you begin + +- Ensure that your `values.yaml` file has `controller.type` set to `statefulset`. + +### Steps + +To configure clustering: + +1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `agent` block. + + ```yaml + agent: + clustering:"/docs/agent/ -> /docs/agent//flow + enabled: true + ``` + +1. Upgrade your installation to use the new `values.yaml` file: + + ```bash + helm upgrade -f values.yaml + ``` + + Replace the following: + + - _``_: The name of the installation you chose when you installed the Helm chart. + +1. Use the {{< param "PRODUCT_NAME" >}} [UI][] to verify the cluster status: + + 1. Click **Clustering** in the navigation bar. + + 1. Ensure that all expected nodes appear in the resulting table. + +[clustering]: ../../concepts/clustering/ +[beta]: ../../stability/#beta +[install-helm]: ../../get-started/install/kubernetes/ +[UI]: ../debug/#component-detail-page diff --git a/docs/sources/tasks/configure/_index.md b/docs/sources/tasks/configure/_index.md new file mode 100644 index 0000000000..b8bff7751a --- /dev/null +++ b/docs/sources/tasks/configure/_index.md @@ -0,0 +1,22 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/ +description: Configure Grafana Alloy after it is installed +menuTitle: Configure +title: Configure Grafana Alloy +weight: 90 +--- + +# Configure {{% param "PRODUCT_NAME" %}} + +You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. +The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: + +* Linux: `/etc/grafana-agent-flow.river` +* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` +* Windows: `C:\Program Files\Grafana Agent Flow\config.river` + +This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. + +{{< section >}} + +[Install]: ../../get-started/install/ diff --git a/docs/sources/flow/tasks/configure/configure-kubernetes.md b/docs/sources/tasks/configure/configure-kubernetes.md similarity index 55% rename from docs/sources/flow/tasks/configure/configure-kubernetes.md rename to docs/sources/tasks/configure/configure-kubernetes.md index 2941f68a42..822102f80f 100644 --- a/docs/sources/flow/tasks/configure/configure-kubernetes.md +++ b/docs/sources/tasks/configure/configure-kubernetes.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-kubernetes/ -- ../../setup/configure/configure-kubernetes/ # /docs/agent/latest/flow/setup/configure/configure-kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-kubernetes/ -description: Learn how to configure Grafana Agent Flow on Kubernetes +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-kubernetes/ +description: Learn how to configure Grafana Alloy on Kubernetes menuTitle: Kubernetes -title: Configure Grafana Agent Flow on Kubernetes +title: Configure Grafana Alloy on Kubernetes weight: 200 --- @@ -23,8 +12,7 @@ To configure {{< param "PRODUCT_NAME" >}} on Kubernetes, perform the following s 1. Download a local copy of [values.yaml][] for the Helm chart. -1. Make changes to your copy of `values.yaml` to customize settings for the - Helm chart. +1. Make changes to your copy of `values.yaml` to customize settings for the Helm chart. Refer to the inline documentation in the `values.yaml` for more information about each option. @@ -38,14 +26,10 @@ To configure {{< param "PRODUCT_NAME" >}} on Kubernetes, perform the following s 1. Replace `VALUES_PATH` with the path to your copy of `values.yaml` to use. -[values.yaml]: https://raw.githubusercontent.com/grafana/agent/main/operations/helm/charts/grafana-agent/values.yaml - ## Kustomize considerations -If you are using [Kustomize][] to inflate and install the [Helm chart][], be careful -when using a `configMapGenerator` to generate the ConfigMap containing the -configuration. By default, the generator appends a hash to the name and patches -the resource mentioning it, triggering a rolling update. +If you are using [Kustomize][] to inflate and install the [Helm chart][], be careful when using a `configMapGenerator` to generate the ConfigMap containing the configuration. +By default, the generator appends a hash to the name and patches the resource mentioning it, triggering a rolling update. This behavior is undesirable for {{< param "PRODUCT_NAME" >}} because the startup time can be significant depending on the size of the Write-Ahead Log. You can use the [Helm chart][] sidecar container to watch the ConfigMap and trigger a dynamic reload. @@ -60,6 +44,6 @@ configMapGenerator: options: disableNameSuffixHash: true ``` - +[values.yaml]: https://raw.githubusercontent.com/grafana/agent/main/operations/helm/charts/grafana-agent/values.yaml [Helm chart]: https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent [Kustomize]: https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/ diff --git a/docs/sources/flow/tasks/configure/configure-linux.md b/docs/sources/tasks/configure/configure-linux.md similarity index 56% rename from docs/sources/flow/tasks/configure/configure-linux.md rename to docs/sources/tasks/configure/configure-linux.md index 4b0bd3344e..b0757e9338 100644 --- a/docs/sources/flow/tasks/configure/configure-linux.md +++ b/docs/sources/tasks/configure/configure-linux.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-linux/ -- ../../setup/configure/configure-linux/ # /docs/agent/latest/flow/setup/configure/configure-linux/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-linux/ -description: Learn how to configure Grafana Agent Flow on Linux +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-linux/ +description: Learn how to configure Grafana Alloy on Linux menuTitle: Linux -title: Configure Grafana Agent Flow on Linux +title: Configure Grafana Alloy on Linux weight: 300 --- @@ -36,8 +25,7 @@ To change the configuration file used by the service, perform the following step * Debian or Ubuntu: edit `/etc/default/grafana-agent-flow` * RHEL/Fedora or SUSE/openSUSE: edit `/etc/sysconfig/grafana-agent-flow` -1. Change the contents of the `CONFIG_FILE` environment variable to point to - the new configuration file to use. +1. Change the contents of the `CONFIG_FILE` environment variable to point at the new configuration file to use. 1. Restart the {{< param "PRODUCT_NAME" >}} service: @@ -47,13 +35,11 @@ To change the configuration file used by the service, perform the following step ## Pass additional command-line flags -By default, the {{< param "PRODUCT_NAME" >}} service launches with the [run][] -command, passing the following flags: +By default, the {{< param "PRODUCT_NAME" >}} service launches with the [run][] command, passing the following flags: * `--storage.path=/var/lib/grafana-agent-flow` -To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary, perform -the following steps: +To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary, perform the following steps: 1. Edit the environment file for the service: @@ -69,14 +55,12 @@ the following steps: sudo systemctl restart grafana-agent-flow ``` -To see the list of valid command-line flags that can be passed to the service, -refer to the documentation for the [run][] command. +To see the list of valid command-line flags that can be passed to the service, refer to the documentation for the [run][] command. ## Expose the UI to other machines -By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP -server. This prevents other machines on the network from being able to access -the [UI for debugging][UI]. +By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP server. +This prevents other machines on the network from being able to access the [UI for debugging][UI]. To expose the UI to other machines, complete the following steps: @@ -96,9 +80,5 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `LISTEN_ADDR` with `0.0.0.0`. -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[run]:../../../reference/cli/run/ +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/configure/configure-macos.md b/docs/sources/tasks/configure/configure-macos.md similarity index 58% rename from docs/sources/flow/tasks/configure/configure-macos.md rename to docs/sources/tasks/configure/configure-macos.md index 8b860a010d..d57885617e 100644 --- a/docs/sources/flow/tasks/configure/configure-macos.md +++ b/docs/sources/tasks/configure/configure-macos.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-macos/ -- ../../setup/configure/configure-macos/ # /docs/agent/latest/flow/setup/configure/configure-macos/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-macos/ -description: Learn how to configure Grafana Agent Flow on macOS +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-macos/ +description: Learn how to configure Grafana Alloy on macOS menuTitle: macOS -title: Configure Grafana Agent Flow on macOS +title: Configure Grafana Alloy on macOS weight: 400 --- @@ -32,13 +21,10 @@ To configure {{< param "PRODUCT_NAME" >}} on macOS, perform the following steps: ## Configure the {{% param "PRODUCT_NAME" %}} service {{< admonition type="note" >}} -Due to limitations in Homebrew, customizing the service used by -{{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and -reinstalling {{< param "PRODUCT_NAME" >}}. +Due to limitations in Homebrew, customizing the service used by {{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and reinstalling {{< param "PRODUCT_NAME" >}}. {{< /admonition >}} -To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following -steps: +To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following steps: 1. Run the following command in a terminal: @@ -70,9 +56,8 @@ steps: ## Expose the UI to other machines -By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP -server. This prevents other machines on the network from being able to access -the [UI for debugging][UI]. +By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP server. +This prevents other machines on the network from being able to access the [UI for debugging][UI]. To expose the UI to other machines, complete the following steps: @@ -87,7 +72,4 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `127.0.0.1` with `0.0.0.0`. -{{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/configure/configure-windows.md b/docs/sources/tasks/configure/configure-windows.md similarity index 66% rename from docs/sources/flow/tasks/configure/configure-windows.md rename to docs/sources/tasks/configure/configure-windows.md index 806579ea13..93fc1a4f1a 100644 --- a/docs/sources/flow/tasks/configure/configure-windows.md +++ b/docs/sources/tasks/configure/configure-windows.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-windows/ -- ../../setup/configure/configure-windows/ # /docs/agent/latest/flow/setup/configure/configure-windows/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-windows/ -description: Learn how to configure Grafana Agent Flow on Windows +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-windows/ +description: Learn how to configure Grafana Alloy on Windows menuTitle: Windows -title: Configure Grafana Agent Flow on Windows +title: Configure Grafana Alloy on Windows weight: 500 --- @@ -95,8 +84,4 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `LISTEN_ADDR` with `0.0.0.0`. -{{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} - +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/debug.md b/docs/sources/tasks/debug.md similarity index 61% rename from docs/sources/flow/tasks/debug.md rename to docs/sources/tasks/debug.md index 331307a58d..4f2615dc5c 100644 --- a/docs/sources/flow/tasks/debug.md +++ b/docs/sources/tasks/debug.md @@ -1,18 +1,7 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/debug/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/debug/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/debug/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/debug/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging/ -- ../monitoring/debugging/ # /docs/agent/latest/flow/monitoring/debugging/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/ -description: Learn about debugging issues with Grafana Agent Flow -title: Debug issues with Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/debug/ +description: Learn about debugging issues with Grafana alloy +title: Debug issues with Grafana Alloy menuTitle: Debug issues weight: 1000 --- @@ -28,14 +17,17 @@ Follow these steps to debug issues with {{< param "PRODUCT_NAME" >}}: {{< param "PRODUCT_NAME" >}} includes an embedded UI viewable from the {{< param "PRODUCT_ROOT_NAME" >}} HTTP server, which defaults to listening at `http://localhost:12345`. -> **NOTE**: For security reasons, installations of {{< param "PRODUCT_NAME" >}} on non-containerized platforms default to listening on `localhost`. -> This default prevents other machines on the network from being able to view the UI. -> -> To expose the UI to other machines on the network on non-containerized platforms, refer to the documentation for how you [installed][install] {{< param "PRODUCT_NAME" >}}. -> -> If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, -> refer to the documentation for [the `grafana-agent run` command][grafana-agent run] to learn how to change the HTTP listen address, -> and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. +{{< admonition type="note" >}} +For security reasons, installations of {{< param "PRODUCT_NAME" >}} on non-containerized platforms default to listening on `localhost`. +This default prevents other machines on the network from being able to view the UI. + +To expose the UI to other machines on the network on non-containerized platforms, refer to the documentation for how you [installed][install] {{< param "PRODUCT_NAME" >}}. + +If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, refer to the documentation for the [`grafana-agent run` command][grafana-agent run] to learn how to change the HTTP listen address, > and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. + +[install]: ../../get-started/install/ +[grafana-agent run]: ../../reference/cli/run/ +{{< /admonition >}} ### Home page @@ -113,16 +105,6 @@ To debug issues when using [clustering][], check for the following symptoms. - **Node stuck in terminating state**: The node attempted to gracefully shut down and set its state to Terminating, but it has not completely gone away. Check the clustering page to view the state of the peers and verify that the terminating {{< param "PRODUCT_ROOT_NAME" >}} has been shut down. -{{% docs/reference %}} -[logging]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/logging.md" -[logging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging.md" -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[install]: "/docs/agent/ -> /docs/agent//flow/get-started/install" -[install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install" -[secret]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values.md#secrets.md" -[secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values.md#secrets.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} - +[logging]: ../../reference/config-blocks/logging/ +[clustering]: ../../concepts/clustering/ +[secret]: ../../concepts/config-language/expressions/types_and_values/#secrets diff --git a/docs/sources/tasks/distribute-prometheus-scrape-load.md b/docs/sources/tasks/distribute-prometheus-scrape-load.md new file mode 100644 index 0000000000..9c7dbc41ee --- /dev/null +++ b/docs/sources/tasks/distribute-prometheus-scrape-load.md @@ -0,0 +1,51 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/distribute-prometheus-scrape-load/ +description: Learn how to distribute your Prometheus metrics scrape load +menuTitle: Distribute Prometheus metrics scrape load +title: Distribute Prometheus metrics scrape load +weight: 500 +--- + +# Distribute Prometheus metrics scrape load + +A good predictor for the size of an {{< param "PRODUCT_NAME" >}} deployment is the number of Prometheus targets each {{< param "PRODUCT_ROOT_NAME" >}} scrapes. +[Clustering][] with target auto-distribution allows a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together to dynamically distribute their scrape load, providing high-availability. + +{{< admonition type="note" >}} +Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. + +[beta]: ../../stability/#beta +{{< /admonition >}} + +## Before you begin + +- Familiarize yourself with how to [configure][] existing {{< param "PRODUCT_NAME" >}} installations. +- [Configure Prometheus metrics collection][]. +- [Configure clustering][]. +- Ensure that all of your clustered {{< param "PRODUCT_ROOT_NAME" >}}s have the same configuration file. + +## Steps + +To distribute Prometheus metrics scrape load with clustering: + +1. Add the following block to all `prometheus.scrape` components, which should use auto-distribution: + + ```river + clustering { + enabled = true + } + ``` + +1. Restart or reload {{< param "PRODUCT_ROOT_NAME" >}}s for them to use the new configuration. + +1. Validate that auto-distribution is functioning: + + 1. Using the {{< param "PRODUCT_ROOT_NAME" >}} [UI][] on each {{< param "PRODUCT_ROOT_NAME" >}}, navigate to the details page for one of the `prometheus.scrape` components you modified. + + 1. Compare the Debug Info sections between two different {{< param "PRODUCT_ROOT_NAME" >}} to ensure that they're not scraping the same sets of targets. + +[Clustering]: ../../concepts/clustering/ +[configure]: ../configure/ +[Configure Prometheus metrics collection]: ../collect-prometheus-metrics/ +[Configure clustering]: ../configure-agent-clustering/ +[UI]: ../debug/#component-detail-page diff --git a/docs/sources/tasks/estimate-resource-usage.md b/docs/sources/tasks/estimate-resource-usage.md new file mode 100644 index 0000000000..cac6956363 --- /dev/null +++ b/docs/sources/tasks/estimate-resource-usage.md @@ -0,0 +1,58 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/resource-usage/ +description: Estimate expected Grafana Alloy resource usage +headless: true +title: Estimate resource usage +menuTitle: Estimate resource usage +weight: 190 +--- + +# Estimate {{% param "PRODUCT_NAME" %}} resource usage + +This page provides guidance for expected resource usage of {{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. + +{{< admonition type="note" >}} +The resource usage depends on the workload, hardware, and the configuration used. +The information on this page is a good starting point for most users, but your actual usage may be different. +{{< /admonition >}} + +## Prometheus metrics + +The Prometheus metrics resource usage depends mainly on the number of active series that need to be scraped and the scrape interval. + +As a rule of thumb, **per each 1 million active series** and with the default scrape interval, you can expect to use approximately: + +* 0.4 CPU cores +* 11 GiB of memory +* 1.5 MiB/s of total network bandwidth, send and receive + +These recommendations are based on deployments that use [clustering][], but they will broadly apply to other deployment modes. +Refer to [Deploy {{< param "PRODUCT_NAME" >}}][deploy] for more information on how to deploy {{< param "PRODUCT_NAME" >}}. + +## Loki logs + +Loki logs resource usage depends mainly on the volume of logs ingested. + +As a rule of thumb, **per each 1 MiB/second of logs ingested**, you can expect to use approximately: + +* 1 CPU core +* 120 MiB of memory + +These recommendations are based on Kubernetes DaemonSet deployments on clusters with relatively small number of nodes and high logs volume on each. +The resource usage can be higher per each 1 MiB/second of logs if you have a large number of small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. + +Additionally, factors such as number of labels, number of files and average log line length may all play a role in the resource usage. + +## Pyroscope profiles + +Pyroscope profiles resource usage depends mainly on the volume of profiles. + +As a rule of thumb, **per each 100 profiles/second**, you can expect to use approximately: + +* 1 CPU core +* 10 GiB of memory + +Factors such as size of each profile and frequency of fetching them also play a role in the overall resource usage. + +[deploy]: ../../get-started/deploy-alloy/ +[clustering]: ../../concepts/clustering/ diff --git a/docs/sources/tasks/migrate/_index.md b/docs/sources/tasks/migrate/_index.md new file mode 100644 index 0000000000..9ee1f3238a --- /dev/null +++ b/docs/sources/tasks/migrate/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/ +description: How to migrate to Grafana Alloy +menuTitle: Migrate +title: Migrate to Grafana Alloy +weight: 100 +--- + +# How to migrate to {{% param "PRODUCT_NAME" %}} + +This section details how to migrate to {{< param "PRODUCT_NAME" >}} from other common solutions. + +{{< section >}} diff --git a/docs/sources/flow/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md similarity index 69% rename from docs/sources/flow/tasks/migrate/from-operator.md rename to docs/sources/tasks/migrate/from-operator.md index f035f95484..58c62f792e 100644 --- a/docs/sources/flow/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -1,15 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-operator/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-operator/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-operator/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-operator/ -- ../../getting-started/migrating-from-operator/ # /docs/agent/latest/flow/getting-started/migrating-from-operator/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-operator/ -description: Migrate from Grafana Agent Operator to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-operator/ +description: Migrate from Grafana Agent Operator to Grafana Alloy menuTitle: Migrate from Operator -title: Migrate from Grafana Agent Operator to Grafana Agent Flow +title: Migrate from Grafana Agent Operator to Grafana Alloy weight: 320 --- @@ -275,38 +268,24 @@ The logging subsystem is very powerful and has many options for processing logs. ## Integrations The `Integration` CRD isn't supported with {{< param "PRODUCT_NAME" >}}. -However, all static mode integrations have an equivalent component in the [`prometheus.exporter`][] namespace. +However, all static mode integrations have an equivalent component in the [`prometheus.exporter`][prometheus.exporter] namespace. The [reference documentation][component documentation] should help convert those integrations to their {{< param "PRODUCT_NAME" >}} equivalent. -[default values]: https://github.com/grafana/agent/blob/main/operations/helm/charts/grafana-agent/values.yaml - -{{% docs/reference %}} -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering" -[deployment guide]: "/docs/agent/ -> /docs/agent//flow/get-started/deploy-agent" -[deployment guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/deploy-agent" -[operator guide]: "/docs/agent/ -> /docs/agent//operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" -[operator guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" -[Helm chart]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes" -[Helm chart]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/install/kubernetes" -[remote.kubernetes.secret]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.kubernetes.secret.md" -[remote.kubernetes.secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/remote.kubernetes.secret.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.remote_write.md" -[prometheus.operator.podmonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.podmonitors.md" -[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.podmonitors.md" -[prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md" -[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.servicemonitors.md" -[prometheus.operator.probes]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.probes.md" -[prometheus.operator.probes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.probes.md" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.scrape" -[loki.source.kubernetes]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.kubernetes.md" -[loki.source.kubernetes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/loki.source.kubernetes.md" -[loki.source.podlogs]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.podlogs.md" -[loki.source.podlogs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/loki.source.podlogs.md" -[component documentation]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[component documentation]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components" -[`prometheus.exporter`]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[`prometheus.exporter`]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components" -{{% /docs/reference %}} + +[default values]: https://github.com/grafana/alloy/blob/main/operations/helm/charts/grafana-agent/values.yaml +[clustering]: ../../../concepts/clustering/ +[deployment guide]: ../../../get-started/deploy-alloy + +[operator guide]: https://grafana.com/docs/agent/latest/operator/deploy-agent-operator-resources/#deploy-a-metricsinstance-resource + +[Helm chart]: ../../../get-started/install/kubernetes/ +[remote.kubernetes.secret]: ../../../reference/components/remote.kubernetes.secret/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[prometheus.operator.podmonitors]: ../../../reference/components/prometheus.operator.podmonitors/ +[prometheus.operator.servicemonitors]: ../../../reference/components/prometheus.operator.servicemonitors/ +[prometheus.operator.probes]: ../../../reference/components/prometheus.operator.probes/ +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[loki.source.kubernetes]: ../../../reference/components/loki.source.kubernetes/ +[loki.source.podlogs]: ../../../reference/components/loki.source.podlogs/ +[component documentation]: ../../../reference/components/ +[prometheus.exporter]: ../../../reference/components/ diff --git a/docs/sources/flow/tasks/migrate/from-prometheus.md b/docs/sources/tasks/migrate/from-prometheus.md similarity index 71% rename from docs/sources/flow/tasks/migrate/from-prometheus.md rename to docs/sources/tasks/migrate/from-prometheus.md index 84241791ec..d5bdc6bd82 100644 --- a/docs/sources/flow/tasks/migrate/from-prometheus.md +++ b/docs/sources/tasks/migrate/from-prometheus.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-prometheus/ -- ../../getting-started/migrating-from-prometheus/ # /docs/agent/latest/flow/getting-started/migrating-from-prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-prometheus/ -description: Learn how to migrate from Prometheus to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-prometheus/ +description: Learn how to migrate from Prometheus to Grafana Alloy menuTitle: Migrate from Prometheus -title: Migrate from Prometheus to Grafana Agent Flow +title: Migrate from Prometheus to Grafana Alloy weight: 320 --- @@ -71,10 +60,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the `convert` command can't convert a Prometheus configuration, diagnostic information is sent to `stderr`.\ You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{< admonition type="caution" >}} - If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. - Make sure you fully test the converted configuration before using it in a production environment. - {{< /admonition >}} + {{< admonition type="caution" >}} + If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. + Make sure you fully test the converted configuration before using it in a production environment. + {{< /admonition >}} {{< code >}} @@ -131,14 +120,14 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} > using a Prometheus configuration. -[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. +[Run][run alloy] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. Your configuration file must be a valid Prometheus configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging 1. You can follow the convert CLI command [debugging][] instructions to generate a diagnostic report. -1. Refer to the {{< param "PRODUCT_NAME" >}} [Debugging][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. +1. Refer to [Debug {{< param "PRODUCT_NAME" >}}][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. 1. If your Prometheus configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. @@ -244,24 +233,12 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Prometheus]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/ [debugging]: #debugging [example]: #example - -{{% docs/reference %}} -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../tasks/debug/ +[River]: ../../../concepts/config-language/ +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/migrate/from-promtail.md b/docs/sources/tasks/migrate/from-promtail.md similarity index 73% rename from docs/sources/flow/tasks/migrate/from-promtail.md rename to docs/sources/tasks/migrate/from-promtail.md index 7a0dda9b92..6699ed03f2 100644 --- a/docs/sources/flow/tasks/migrate/from-promtail.md +++ b/docs/sources/tasks/migrate/from-promtail.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-promtail/ -- ../../getting-started/migrating-from-promtail/ # /docs/agent/latest/flow/getting-started/migrating-from-promtail/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-promtail/ -description: Learn how to migrate from Promtail to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-promtail/ +description: Learn how to migrate from Promtail to Grafana Alloy menuTitle: Migrate from Promtail -title: Migrate from Promtail to Grafana Agent Flow +title: Migrate from Promtail to Grafana Alloy weight: 330 --- @@ -64,7 +53,7 @@ This conversion will enable you to take full advantage of the many additional fe * _``_: The full path to the Promtail configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Run][] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: +1. [Run][run alloy] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: ### Debugging @@ -127,7 +116,7 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Promtail configuration. -[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. +[Run][run alloy] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. Your configuration file must be a valid Promtail configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging @@ -225,26 +214,13 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Promtail]: https://www.grafana.com/docs/loki//clients/promtail/ [debugging]: #debugging [expanded in the configuration file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration - -{{% docs/reference %}} -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../../tasks/debug/ +[River]: ../../../concepts/config-language/ +[UI]: ../../tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/migrate/from-static.md b/docs/sources/tasks/migrate/from-static.md similarity index 60% rename from docs/sources/flow/tasks/migrate/from-static.md rename to docs/sources/tasks/migrate/from-static.md index 5d1b73626f..0e82ff92ac 100644 --- a/docs/sources/flow/tasks/migrate/from-static.md +++ b/docs/sources/tasks/migrate/from-static.md @@ -1,25 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-static/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-static/ -- ../../getting-started/migrating-from-static/ # /docs/agent/latest/flow/getting-started/migrating-from-static/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-static/ -description: Learn how to migrate your configuration from Grafana Agent Static to Grafana Agent Flow -menuTitle: Migrate from Static to Flow -title: Migrate Grafana Agent Static to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-static/ +description: Learn how to migrate your configuration from Grafana Agent Static to Grafana Alloy +menuTitle: Migrate from Grafana Agent Static +title: Migrate Grafana Agent Static to Grafana Alloy weight: 340 --- -# Migrate from {{% param "PRODUCT_ROOT_NAME" %}} Static to {{% param "PRODUCT_NAME" %}} +# Migrate from Grafana Agent Static to {{% param "PRODUCT_NAME" %}} -The built-in {{< param "PRODUCT_ROOT_NAME" >}} convert command can migrate your [Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. +The built-in {{< param "PRODUCT_ROOT_NAME" >}} convert command can migrate your [Grafana Agent Static][Static] configuration to a {{< param "PRODUCT_NAME" >}} configuration. This topic describes how to: @@ -42,7 +31,7 @@ This topic describes how to: ## Convert a Grafana Agent Static configuration -To fully migrate Grafana Agent [Static][] to {{< param "PRODUCT_NAME" >}}, you must convert your Static configuration into a {{< param "PRODUCT_NAME" >}} configuration. +To fully migrate Grafana Agent Static to {{< param "PRODUCT_NAME" >}}, you must convert your Grafana Agent Static configuration into a {{< param "PRODUCT_NAME" >}} configuration. This conversion will enable you to take full advantage of the many additional features available in {{< param "PRODUCT_NAME" >}}. > In this task, you will use the [convert][] CLI command to output a {{< param "PRODUCT_NAME" >}} @@ -64,14 +53,14 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _`_`: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: +1. [Run][run alloy] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: ### Debugging -1. If the convert command can't convert a [Static][] configuration, diagnostic information is sent to `stderr`. +1. If the convert command can't convert a Grafana Agent Static configuration, diagnostic information is sent to `stderr`. You can use the `--bypass-errors` flag to bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion. {{< admonition type="caution" >}} @@ -93,7 +82,7 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. 1. You can use the `--report` flag to output a diagnostic report. @@ -112,7 +101,7 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. * _``_: The output path for the report. @@ -122,16 +111,16 @@ This conversion will enable you to take full advantage of the many additional fe (Warning) Please review your agent command line flags and ensure they are set in your {{< param "PRODUCT_NAME" >}} configuration file where necessary. ``` -## Run a Static mode configuration +## Run a Grafana Agent Static mode configuration If you’re not ready to completely switch to a {{< param "PRODUCT_NAME" >}} configuration, you can run {{< param "PRODUCT_ROOT_NAME" >}} using your existing Grafana Agent Static configuration. -The `--config.format=static` flag tells {{< param "PRODUCT_ROOT_NAME" >}} to convert your [Static] configuration to {{< param "PRODUCT_NAME" >}} and load it directly without saving the new configuration. +The `--config.format=static` flag tells {{< param "PRODUCT_ROOT_NAME" >}} to convert your Grafana Agent Static configuration to {{< param "PRODUCT_NAME" >}} and load it directly without saving the new configuration. This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your existing Grafana Agent Static configuration infrastructure. > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Static configuration. [Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=static`. -Your configuration file must be a valid [Static] configuration file. +Your configuration file must be a valid Grafana Agent Static configuration file. ### Debugging @@ -139,7 +128,7 @@ Your configuration file must be a valid [Static] configuration file. 1. Refer to the {{< param "PRODUCT_NAME" >}} [debugging UI][DebuggingUI] for more information about running {{< param "PRODUCT_NAME" >}}. -1. If your [Static] configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. +1. If your Grafana Agent Static configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can use the `--config.bypass-conversion-errors` flag with `--config.format=static` to bypass any non-critical issues and start {{< param "PRODUCT_NAME" >}}. {{< admonition type="caution" >}} @@ -149,9 +138,9 @@ Your configuration file must be a valid [Static] configuration file. ## Example -This example demonstrates converting a [Static] configuration file to a {{< param "PRODUCT_NAME" >}} configuration file. +This example demonstrates converting a Grafana Agent Static configuration file to a {{< param "PRODUCT_NAME" >}} configuration file. -The following [Static] configuration file provides the input for the conversion. +The following Grafana Agent Static configuration file provides the input for the conversion. ```yaml server: @@ -223,7 +212,7 @@ grafana-agent-flow convert --source-format=static --output= Replace the following: -* _``_: The full path to the [Static][] configuration. +* _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. The new {{< param "PRODUCT_NAME" >}} configuration file looks like this: @@ -320,9 +309,9 @@ grafana-agent-flow convert --source-format=static --extra-args="-enable-features {{< /code >}} Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. - + ## Environment Vars You can use the `-config.expand-env` command line flag to interpret environment variables in your Grafana Agent Static configuration. @@ -343,7 +332,7 @@ The following list is specific to the convert command and not {{< param "PRODUCT Any additional unsupported features are returned as errors during conversion. * There is no gRPC server to configure for {{< param "PRODUCT_NAME" >}}, as any non-default configuration will show as unsupported during the conversion. * Check if you are using any extra command line arguments with Static that aren't present in your configuration file. For example, `-server.http.address`. -* Check if you are using any environment variables in your [Static][] configuration. +* Check if you are using any environment variables in your Grafana Agent Static configuration. These will be evaluated during conversion and you may want to replace them with the {{< param "PRODUCT_NAME" >}} Standard library [env][] function after conversion. * Review additional [Prometheus Limitations][] for limitations specific to your [Metrics][] configuration. * Review additional [Promtail Limitations][] for limitations specific to your [Logs][] configuration. @@ -353,49 +342,33 @@ The following list is specific to the convert command and not {{< param "PRODUCT [debugging]: #debugging [example]: #example -{{% docs/reference %}} -[Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match.md" -[loki.process]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.process.md" -[loki.process]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/" -[Integrations next]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/_index.md" -[Integrations next]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/traces-config.md -[Traces]: "/docs/agent/ -> /docs/agent//static/configuration/traces-config.md" -[Traces]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/traces-config.md" -[Agent Management]: "/docs/agent/ -> /docs/agent//static/configuration/agent-management.md" -[Agent Management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/agent-management.md" -[env]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib/env.md" -[env]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env.md" -[Prometheus Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-prometheus.md#limitations" -[Prometheus Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus.md#limitations" -[Promtail Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md#limitations" -[Promtail Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail.md#limitations" -[Metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config.md" -[Metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/metrics-config.md" -[Logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config.md" -[Logs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/logs-config.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} + +[Static]: https://grafana.com/docs/agent/latest/static + +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.process]: ../../../reference/components/loki.process/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../debug/ +[River]: ../../../concepts/config-language/ + + +[Integrations next]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ +[Traces]: https://grafana.com/docs/agent/latest/static/configuration/traces-config/ +[Agent Management]: https://grafana.com/docs/agent/latest/static/configuration/agent-management/ + +[env]: ../../../reference/stdlib/env/ +[Prometheus Limitations]: ../from-prometheus/#limitations +[Promtail Limitations]: ../from-promtail/#limitations + + +[Metrics]: https://grafana.com/docs/agent/latest/static/configuration/metrics-config/ +[Logs]: https://grafana.com/docs/agent/latest/static/configuration/logs-config/ + +[UI]: ../../debug/#grafana-agent-flow-ui diff --git a/docs/sources/tasks/monitor/_index.md b/docs/sources/tasks/monitor/_index.md new file mode 100644 index 0000000000..2dd265cfe1 --- /dev/null +++ b/docs/sources/tasks/monitor/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/monitor/ +description: Learn about monitoring Grafana Alloy +title: Monitor Grafana Alloy +menuTitle: Monitor +weight: 110 +--- + +# How to monitor {{% param "PRODUCT_NAME" %}} + +This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/tasks/monitor/component_metrics.md b/docs/sources/tasks/monitor/component_metrics.md new file mode 100644 index 0000000000..65cdf81261 --- /dev/null +++ b/docs/sources/tasks/monitor/component_metrics.md @@ -0,0 +1,28 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/monitor/component_metrics/ +description: Learn how to monitor component metrics +title: Monitor components +weight: 200 +--- + +# How to monitor components + +{{< param "PRODUCT_NAME" >}} [components][] may optionally expose Prometheus metrics which can be used to investigate the behavior of that component. +These component-specific metrics are only generated when an instance of that component is running. + +> Component-specific metrics are different than any metrics being processed by the component. +> Component-specific metrics are used to expose the state of a component for observability, alerting, and debugging. + +Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. + +> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. + +Component-specific metrics have a `component_id` label matching the component ID generating those metrics. +For example, component-specific metrics for a `prometheus.remote_write` component labeled `production` will have a `component_id` label with the value `prometheus.remote_write.production`. + +The [reference documentation][] for each component described the list of component-specific metrics that the component exposes. +Not all components expose metrics. + +[components]: ../../../concepts/components/ +[grafana-agent run]: ../../../reference/cli/run/ +[reference documentation]: ../../../reference/components/ diff --git a/docs/sources/tasks/monitor/controller_metrics.md b/docs/sources/tasks/monitor/controller_metrics.md new file mode 100644 index 0000000000..6ce2bf5010 --- /dev/null +++ b/docs/sources/tasks/monitor/controller_metrics.md @@ -0,0 +1,27 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/monitor/controller_metrics/ +description: Learn how to monitor controller metrics +title: Monitor controller +weight: 100 +--- + +# How to monitor controller + +The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus metrics which you can use to investigate the controller state. + +Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. + +> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. + +The controller exposes the following metrics: + +* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. + This value may be misrepresented depending on how fast evaluations complete or how often evaluations occur. +* `agent_component_controller_running_components` (Gauge): The current number of running components by health. + The health is represented in the `health_type` label. +* `agent_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. +* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. +* `agent_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. + +[component controller]: ../../../concepts/component_controller/ +[grafana-agent run]: ../../../reference/cli/run/ diff --git a/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md similarity index 76% rename from docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md rename to docs/sources/tasks/opentelemetry-to-lgtm-stack.md index 2da9790783..7d78626a36 100644 --- a/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md +++ b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md @@ -1,18 +1,6 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- ../getting-started/opentelemetry-to-lgtm-stack/ # /docs/agent/latest/flow/getting-started/opentelemetry-to-lgtm-stack/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/opentelemetry-to-lgtm-stack/ -description: Learn how to collect OpenTelemetry data and forward it to the Grafana - stack +canonical: https://grafana.com/docs/alloy/latest/tasks/opentelemetry-to-lgtm-stack/ +description: Learn how to collect OpenTelemetry data and forward it to the Grafana stack title: OpenTelemetry to Grafana stack weight: 350 --- @@ -44,7 +32,8 @@ This topic describes how to: * Have a set of OpenTelemetry applications ready to push telemetry data to {{< param "PRODUCT_NAME" >}}. * Identify where {{< param "PRODUCT_NAME" >}} will write received telemetry data. * Be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. -* Complete the [Collect open telemetry data][] task. You will pick up from where that guide ended. +* Complete the [Collect open telemetry data][] task. + You will pick up from where that guide ended. ## The pipeline @@ -98,7 +87,7 @@ Traces: OTel → batch processor → OTel exporter ## Grafana Loki [Grafana Loki][] is a horizontally scalable, highly available, multi-tenant log aggregation system inspired by Prometheus. -Similar to Prometheus, to send from OTLP to Loki, you can do a passthrough from the [otelcol.exporter.loki] component to [loki.write] component. +Similar to Prometheus, to send from OTLP to Loki, you can do a passthrough from the [otelcol.exporter.loki][] component to [loki.write][] component. ```river otelcol.exporter.loki "default" { @@ -310,7 +299,7 @@ ts=2023-05-09T09:37:15.304109Z component=otelcol.receiver.otlp.default level=inf ts=2023-05-09T09:37:15.304234Z component=otelcol.receiver.otlp.default level=info msg="Starting HTTP server" endpoint=0.0.0.0:4318 ``` -You can now check the pipeline graphically by visiting http://localhost:12345/graph +You can now check the pipeline graphically by visiting [http://localhost:12345/graph][] ![](../../../assets/tasks/otlp-lgtm-graph.png) @@ -320,26 +309,14 @@ You can now check the pipeline graphically by visiting http://localhost:12345/gr [Grafana Cloud Portal]: https://grafana.com/docs/grafana-cloud/account-management/cloud-portal#your-grafana-cloud-stack [Prometheus Remote Write]: https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage [Grafana Mimir]: https://grafana.com/oss/mimir/ - -{{% docs/reference %}} -[Collect open telemetry data]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[Collect open telemetry data]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[otelcol.auth.basic]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.auth.basic.md" -[otelcol.auth.basic]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic.md" -[otelcol.exporter.loki]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.loki.md" -[otelcol.exporter.loki]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loki.md" -[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.prometheus]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.prometheus.md" -[otelcol.exporter.prometheus]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.prometheus.md" -[otelcol.processor.batch]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.batch.md" -[otelcol.processor.batch]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch.md" -[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" -[otelcol.receiver.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -{{% /docs/reference %}} +[Collect open telemetry data]: ../collect-opentelemetry-data/ +[Components]: ../../concepts/components/ +[loki.write]: ../../reference/components/loki.write/ +[otelcol.auth.basic]: ../../reference/components/otelcol.auth.basic/ +[otelcol.exporter.loki]: ../../reference/components/otelcol.exporter.loki/ +[otelcol.exporter.otlp]: ../../reference/components/otelcol.exporter.otlp/ +[otelcol.exporter.prometheus]: ../../reference/components/otelcol.exporter.prometheus/ +[otelcol.processor.batch]: ../../reference/components/otelcol.processor.batch/ +[otelcol.receiver.otlp]: ../../reference/components/otelcol.receiver.otlp/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ +[http://localhost:12345/graph]: http://localhost:12345/graph diff --git a/docs/sources/tutorials/_index.md b/docs/sources/tutorials/_index.md new file mode 100644 index 0000000000..03760aa536 --- /dev/null +++ b/docs/sources/tutorials/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/ +description: Learn how to use Grafana Alloy +title: Tutorials +weight: 300 +--- + +# Tutorials + +This section provides tutorials for learning how to use {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tutorials/assets/docker-compose.yaml b/docs/sources/tutorials/assets/docker-compose.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/docker-compose.yaml rename to docs/sources/tutorials/assets/docker-compose.yaml diff --git a/docs/sources/flow/tutorials/assets/flow_configs/agent.river b/docs/sources/tutorials/assets/flow_configs/agent.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/agent.river rename to docs/sources/tutorials/assets/flow_configs/agent.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/example.river b/docs/sources/tutorials/assets/flow_configs/example.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/example.river rename to docs/sources/tutorials/assets/flow_configs/example.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/multiple-inputs.river b/docs/sources/tutorials/assets/flow_configs/multiple-inputs.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/multiple-inputs.river rename to docs/sources/tutorials/assets/flow_configs/multiple-inputs.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/relabel.river b/docs/sources/tutorials/assets/flow_configs/relabel.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/relabel.river rename to docs/sources/tutorials/assets/flow_configs/relabel.river diff --git a/docs/sources/flow/tutorials/assets/generate.sh b/docs/sources/tutorials/assets/generate.sh similarity index 100% rename from docs/sources/flow/tutorials/assets/generate.sh rename to docs/sources/tutorials/assets/generate.sh diff --git a/docs/sources/flow/tutorials/assets/grafana/config/grafana.ini b/docs/sources/tutorials/assets/grafana/config/grafana.ini similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/config/grafana.ini rename to docs/sources/tutorials/assets/grafana/config/grafana.ini diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml b/docs/sources/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml rename to docs/sources/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards/agent.json b/docs/sources/tutorials/assets/grafana/dashboards/agent.json similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards/agent.json rename to docs/sources/tutorials/assets/grafana/dashboards/agent.json diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards/template.jsonnet b/docs/sources/tutorials/assets/grafana/dashboards/template.jsonnet similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards/template.jsonnet rename to docs/sources/tutorials/assets/grafana/dashboards/template.jsonnet diff --git a/docs/sources/flow/tutorials/assets/grafana/datasources/datasource.yml b/docs/sources/tutorials/assets/grafana/datasources/datasource.yml similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/datasources/datasource.yml rename to docs/sources/tutorials/assets/grafana/datasources/datasource.yml diff --git a/docs/sources/flow/tutorials/assets/mimir/mimir.yaml b/docs/sources/tutorials/assets/mimir/mimir.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/mimir/mimir.yaml rename to docs/sources/tutorials/assets/mimir/mimir.yaml diff --git a/docs/sources/flow/tutorials/assets/runt.sh b/docs/sources/tutorials/assets/runt.sh similarity index 100% rename from docs/sources/flow/tutorials/assets/runt.sh rename to docs/sources/tutorials/assets/runt.sh diff --git a/docs/sources/flow/tutorials/chaining.md b/docs/sources/tutorials/chaining.md similarity index 64% rename from docs/sources/flow/tutorials/chaining.md rename to docs/sources/tutorials/chaining.md index 9be20dbc3a..63a97cabf6 100644 --- a/docs/sources/flow/tutorials/chaining.md +++ b/docs/sources/tutorials/chaining.md @@ -1,11 +1,5 @@ --- -aliases: -- ./chaining/ -- /docs/grafana-cloud/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/chaining/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/chaining/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/chaining/ description: Learn how to chain Prometheus components menuTitle: Chain Prometheus components title: Chain Prometheus components @@ -16,7 +10,8 @@ weight: 400 This tutorial shows how to use [multiple-inputs.river][] to send data to several different locations. This tutorial uses the same base as [Filtering metrics][]. -A new concept introduced in Flow is chaining components together in a composable pipeline. This promotes the reusability of components while offering flexibility. +A new concept introduced in {{< param "PRODUCT_NAME" >}} is chaining components together in a composable pipeline. +This promotes the reusability of components while offering flexibility. ## Prerequisites @@ -33,10 +28,11 @@ curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tuto The `runt.sh` script does: 1. Downloads the configurations necessary for Mimir, Grafana, and {{< param "PRODUCT_ROOT_NAME" >}}. -2. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. -3. Runs the `docker-compose up` command to bring all the services up. +1. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. +1. Runs the `docker-compose up` command to bring all the services up. -Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][] to see {{< param "PRODUCT_ROOT_NAME" >}} scrape metrics. The [node_exporter][] metrics also show up now. +Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][] to see {{< param "PRODUCT_ROOT_NAME" >}} scrape metrics. +The [node_exporter][] metrics also show up now. There are two scrapes each sending metrics to one filter. Note the `job` label lists the full name of the scrape component. @@ -74,7 +70,8 @@ prometheus.remote_write "prom" { } ``` -In the Flow block, `prometheus.relabel.service` is being forwarded metrics from two sources `prometheus.scrape.agent` and `prometheus.exporter.unix.default`. This allows for a single relabel component to be used with any number of inputs. +In the {{< param "PRODUCT_ROOT_NAME" >}} block, `prometheus.relabel.service` is being forwarded metrics from two sources `prometheus.scrape.agent` and `prometheus.exporter.unix default`. +This allows for a single relabel component to be used with any number of inputs. ## Adding another relabel @@ -82,11 +79,7 @@ In `multiple-input.river` add a new `prometheus.relabel` component that adds a ` ![Add a new label with the value v2](/media/docs/agent/screenshot-grafana-agent-chaining-scrape-v2.png) -[multiple-inputs.river]: https://grafana.com/docs/agent//flow/tutorials/assets/flow_configs/multiple-inputs.river +[multiple-inputs.river]: ../assets/flow_configs/multiple-inputs.river +[Filtering metrics]: ../filtering-metrics/ [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D [node_exporter]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22node_cpu_seconds_total%22%7D%5D - -{{% docs/reference %}} -[Filtering metrics]: "/docs/agent/ -> /docs/agent//flow/tutorials/filtering-metrics.md" -[Filtering metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/filtering-metrics.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tutorials/collecting-prometheus-metrics.md b/docs/sources/tutorials/collecting-prometheus-metrics.md similarity index 60% rename from docs/sources/flow/tutorials/collecting-prometheus-metrics.md rename to docs/sources/tutorials/collecting-prometheus-metrics.md index a665474190..d5600a4010 100644 --- a/docs/sources/flow/tutorials/collecting-prometheus-metrics.md +++ b/docs/sources/tutorials/collecting-prometheus-metrics.md @@ -1,11 +1,5 @@ --- -aliases: -- ./collecting-prometheus-metrics/ -- /docs/grafana-cloud/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/collecting-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/collecting-prometheus-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/collecting-prometheus-metrics/ description: Learn how to collect Prometheus metrics menuTitle: Collect Prometheus metrics title: Collect Prometheus metrics @@ -14,7 +8,8 @@ weight: 200 # Collect Prometheus metrics -{{< param "PRODUCT_ROOT_NAME" >}} is a telemetry collector with the primary goal of moving telemetry data from one location to another. In this tutorial, you'll set up {{< param "PRODUCT_NAME" >}}. +{{< param "PRODUCT_ROOT_NAME" >}} is a telemetry collector with the primary goal of moving telemetry data from one location to another. +In this tutorial, you'll set up {{< param "PRODUCT_NAME" >}}. ## Prerequisites @@ -31,8 +26,8 @@ curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tuto The `runt.sh` script does: 1. Downloads the configurations necessary for Mimir, Grafana, and {{< param "PRODUCT_ROOT_NAME" >}}. -2. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. -3. Runs the docker-compose up command to bring all the services up. +1. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. +1. Runs the docker-compose up command to bring all the services up. Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][]. @@ -44,7 +39,8 @@ Navigate to `http://localhost:12345/graph` to view the {{< param "PRODUCT_NAME" ![The User Interface](/media/docs/agent/screenshot-grafana-agent-collect-metrics-graph.png) -{{< param "PRODUCT_ROOT_NAME" >}} displays the component pipeline in a dependency graph. See [Scraping component](#scraping-component) and [Remote Write component](#remote-write-component) for details about the components used in this configuration. +{{< param "PRODUCT_ROOT_NAME" >}} displays the component pipeline in a dependency graph. +See [Scraping component](#scraping-component) and [Remote Write component](#remote-write-component) for details about the components used in this configuration. Click the nodes to navigate to the associated component page. There, you can view the state, health information, and, if applicable, the debug information. ![Component information](/media/docs/agent/screenshot-grafana-agent-collect-metrics-comp-info.png) @@ -67,11 +63,14 @@ prometheus.scrape "default" { } ``` -The `prometheus.scrape "default"` annotation indicates the name of the component, `prometheus.scrape`, and its label, `default`. All components must have a unique combination of name and if applicable label. +The `prometheus.scrape "default"` annotation indicates the name of the component, `prometheus.scrape`, and its label, `default`. +All components must have a unique combination of name and if applicable label. -The `targets` [attribute][] is an [argument][]. `targets` is a list of labels that specify the target via the special key `__address__`. The scraper is targeting the {{< param "PRODUCT_NAME" >}} `/metrics` endpoint. Both `http` and `/metrics` are implied but can be overridden. +The `targets` [attribute][] is an [argument][]. `targets` is a list of labels that specify the target via the special key `__address__`. +The scraper is targeting the {{< param "PRODUCT_NAME" >}} `/metrics` endpoint. Both `http` and `/metrics` are implied but can be overridden. -The `forward_to` attribute is an argument that references the [export][] of the `prometheus.remote_write.prom` component. This is where the scraper will send the metrics for further processing. +The `forward_to` attribute is an argument that references the [export][] of the `prometheus.remote_write.prom` component. +This is where the scraper will send the metrics for further processing. ## Remote Write component @@ -95,16 +94,8 @@ To try out {{< param "PRODUCT_ROOT_NAME" >}} without using Docker: [Docker]: https://www.docker.com/products/docker-desktop [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D - -{{% docs/reference %}} -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[attribute]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/#attributes" -[attribute]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/#attributes" -[argument]: "/docs/agent/ -> /docs/agent//flow/concepts/components" -[argument]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components" -[export]: "/docs/agent/ -> /docs/agent//flow/concepts/components" -[export]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -{{% /docs/reference %}} +[prometheus.scrape]: ../../reference/components/prometheus.scrape/ +[attribute]: ../../concepts/config-language/#attributes +[argument]: ../../concepts/components/ +[export]: ../../concepts/components/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ diff --git a/docs/sources/flow/tutorials/filtering-metrics.md b/docs/sources/tutorials/filtering-metrics.md similarity index 64% rename from docs/sources/flow/tutorials/filtering-metrics.md rename to docs/sources/tutorials/filtering-metrics.md index ec942124ec..5d36c45c13 100644 --- a/docs/sources/flow/tutorials/filtering-metrics.md +++ b/docs/sources/tutorials/filtering-metrics.md @@ -1,11 +1,5 @@ --- -aliases: -- ./filtering-metrics/ -- /docs/grafana-cloud/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/filtering-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/filtering-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/filtering-metrics/ description: Learn how to filter Prometheus metrics menuTitle: Filter Prometheus metrics title: Filter Prometheus metrics @@ -14,7 +8,8 @@ weight: 300 # Filter Prometheus metrics -In this tutorial, you'll add a new component [prometheus.relabel][] using [relabel.river][] to filter metrics. This tutorial uses the same base as [Collecting Prometheus metrics][]. +In this tutorial, you'll add a new component [prometheus.relabel][] using [relabel.river][] to filter metrics. +This tutorial uses the same base as [Collect Prometheus metrics][]. ## Prerequisites @@ -53,14 +48,8 @@ Open the `relabel.river` file that was downloaded and change the name of the ser ![Updated dashboard showing api_server_v2](/media/docs/agent/screenshot-grafana-agent-filtering-metrics-transition.png) - [Docker]: https://www.docker.com/products/docker-desktop [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D -[relabel.river]: https://grafana.com/docs/agent//flow/tutorials/assets/flow_configs/relabel.river - -{{% docs/reference %}} -[prometheus.relabel]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.relabel.md" -[prometheus.relabel]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.relabel.md" -[Collecting Prometheus metrics]: "/docs/agent/ -> /docs/agent//flow/tutorials/collecting-prometheus-metrics.md" -[Collecting Prometheus metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/collecting-prometheus-metrics.md" -{{% /docs/reference %}} +[relabel.river]: ../assets/flow_configs/relabel.river/ +[prometheus.relabel]: ../../reference/components/prometheus.relabel/ +[Collect Prometheus metrics]: ../collecting-prometheus-metrics diff --git a/docs/sources/tutorials/flow-by-example/_index.md b/docs/sources/tutorials/flow-by-example/_index.md new file mode 100644 index 0000000000..33f34193dd --- /dev/null +++ b/docs/sources/tutorials/flow-by-example/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/ +description: Learn how to use Grafana Alloy +title: Flow by example +weight: 100 +--- + +# Flow by example + +This section provides a set of step-by-step tutorials that show how to use {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md similarity index 58% rename from docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md rename to docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md index 59bc59c5d1..363a4e8294 100644 --- a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md +++ b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/first-components-and-stdlib/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/first-components-and-stdlib/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/first-components-and-stdlib/ description: Learn about the basics of River and the configuration language title: First components and introducing the standard library weight: 20 @@ -12,20 +7,18 @@ weight: 20 # First components and the standard library -This tutorial covers the basics of the River language and the standard library. It introduces a basic pipeline that collects metrics from the host and sends them to Prometheus. +This tutorial covers the basics of the River language and the standard library. +It introduces a basic pipeline that collects metrics from the host and sends them to Prometheus. ## River basics -[Configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/ -[Configuration language concepts]: https://grafana.com/docs/agent//flow/concepts/configuration_language/ -[Standard library documentation]: https://grafana.com/docs/agent//flow/reference/stdlib/ - **Recommended reading** - [Configuration language][] - [Configuration language concepts][] -[River](https://github.com/grafana/river) is an HCL-inspired configuration language used to configure {{< param "PRODUCT_NAME" >}}. A River file is comprised of three things: +[River][] is an HCL-inspired configuration language used to configure {{< param "PRODUCT_NAME" >}}. +A River file is comprised of three things: 1. **Attributes** @@ -37,11 +30,15 @@ This tutorial covers the basics of the River language and the standard library. 1. **Expressions** - Expressions are used to compute values. They can be constant values (for example, `"localhost:9090"`), or they can be more complex (for example, referencing a component's export: `prometheus.exporter.unix.targets`. They can also be a mathematical expression: `(1 + 2) * 3`, or a standard library function call: `env("HOME")`). We will use more expressions as we go along the examples. If you are curious, you can find a list of available standard library functions in the [Standard library documentation][]. + Expressions are used to compute values. + They can be constant values (for example, `"localhost:9090"`), or they can be more complex (for example, referencing a component's export: `prometheus.exporter.unix.targets`. + They can also be a mathematical expression: `(1 + 2) * 3`, or a standard library function call: `env("HOME")`). We will use more expressions as we go along the examples. + If you are curious, you can find a list of available standard library functions in the [Standard library documentation][]. 1. **Blocks** - Blocks are used to configure components with groups of attributes or nested blocks. The following example block can be used to configure the logging output of {{< param "PRODUCT_NAME" >}}: + Blocks are used to configure components with groups of attributes or nested blocks. + The following example block can be used to configure the logging output of {{< param "PRODUCT_NAME" >}}: ```river logging { @@ -64,11 +61,6 @@ Comments in River are prefixed with `//` and are single-line only. For example: ## Components -[Components]: https://grafana.com/docs/agent//flow/concepts/components/ -[Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller/ -[Components configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/components/ -[env]: https://grafana.com/docs/agent//flow/reference/stdlib/env/ - **Recommended reading** - [Components][] @@ -97,31 +89,34 @@ prometheus.remote_write "local_prom" { ``` {{< admonition type="note" >}} -[Component reference]: https://grafana.com/docs/agent//flow/reference/components/ +A list of all available components can be found in the [Component reference][]. +Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. -A list of all available components can be found in the [Component reference][]. Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. +[Component reference]: ../../../reference/components/ {{< /admonition >}} -This pipeline has two components: `local.file` and `prometheus.remote_write`. The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. The `local.file` component has a single export, `content`, which contains the contents of the file. +This pipeline has two components: `local.file` and `prometheus.remote_write`. +The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. +The `local.file` component has a single export, `content`, which contains the contents of the file. -The `prometheus.remote_write` component is configured with an `endpoint` block, containing the `url` attribute and a `basic_auth` block. The `url` attribute is set to the URL of the Prometheus remote write endpoint. The `basic_auth` block contains the `username` and `password` attributes, which are set to the string `"admin"` and the `content` export of the `local.file` component, respectively. The `content` export is referenced by using the syntax `local.file.example.content`, where `local.file.example` is the fully qualified name of the component (the component's type + its label) and `content` is the name of the export. +The `prometheus.remote_write` component is configured with an `endpoint` block, containing the `url` attribute and a `basic_auth` block. +The `url` attribute is set to the URL of the Prometheus remote write endpoint. +The `basic_auth` block contains the `username` and `password` attributes, which are set to the string `"admin"` and the `content` export of the `local.file` component, respectively. +The `content` export is referenced by using the syntax `local.file.example.content`, where `local.file.example` is the fully qualified name of the component (the component's type + its label) and `content` is the name of the export.

Flow of example pipeline with local.file and prometheus.remote_write components

{{< admonition type="note" >}} -The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. +The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. +The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. {{< /admonition >}} This example pipeline still doesn't do anything, so let's add some more components to it. ## Shipping your first metrics -[prometheus.exporter.unix]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.unix/ -[prometheus.scrape]: https://grafana.com/docs/agent//flow/reference/components/prometheus.scrape/ -[prometheus.remote_write]: https://grafana.com/docs/agent//flow/reference/components/prometheus.remote_write/ - **Recommended reading** - Optional: [prometheus.exporter.unix][] @@ -158,7 +153,9 @@ Run {{< param "PRODUCT_NAME" >}} with: /path/to/agent run config.river ``` -Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After ~15-20 seconds, you should be able to see the metrics from the `prometheus.exporter.unix` component! Try querying for `node_memory_Active_bytes` to see the active memory of your host. +Navigate to [http://localhost:3000/explore][] in your browser. +After ~15-20 seconds, you should be able to see the metrics from the `prometheus.exporter.unix` component. +Try querying for `node_memory_Active_bytes` to see the active memory of your host.

Screenshot of node_memory_Active_bytes query in Grafana @@ -175,17 +172,18 @@ The following diagram is an example pipeline: The preceding configuration defines three components: - `prometheus.scrape` - A component that scrapes metrics from components that export targets. -- `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter](https://github.com/prometheus/node_exporter). +- `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter][]. - `prometheus.remote_write` - A component that sends metrics to a Prometheus remote-write compatible endpoint. -The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. +The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. +The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. -One rule is that components can't form a cycle. This means that a component can't reference itself directly or indirectly. This is to prevent infinite loops from forming in the pipeline. +One rule is that components can't form a cycle. +This means that a component can't reference itself directly or indirectly. +This is to prevent infinite loops from forming in the pipeline. ## Exercise for the reader -[prometheus.exporter.redis]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.redis/ - **Recommended Reading** - Optional: [prometheus.exporter.redis][] @@ -196,7 +194,8 @@ Let's start a container running Redis and configure {{< param "PRODUCT_NAME" >}} docker container run -d --name flow-redis -p 6379:6379 --rm redis ``` -Try modifying the pipeline to scrape metrics from the Redis exporter. You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. +Try modifying the pipeline to scrape metrics from the Redis exporter. +You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. To give a visual hint, you want to create a pipeline that looks like this: @@ -205,9 +204,9 @@ To give a visual hint, you want to create a pipeline that looks like this:

{{< admonition type="note" >}} -[concat]: https://grafana.com/docs/agent//flow/reference/stdlib/concat/ - You may find the [concat][] standard library function useful. + +[concat]: ../../../reference/stdlib/concat/ {{< /admonition >}} You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by running: @@ -216,7 +215,8 @@ You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by runn /path/to/agent run config.river ``` -Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. +Navigate to [http://localhost:3000/explore][] in your browser. +After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. To shut down the Redis container, run: @@ -225,10 +225,11 @@ docker container stop flow-redis ``` If you get stuck, you can always view a solution here: + {{< collapse title="Solution" >}} ```river -// Configure your first components, learn about the standard library, and learn how to run Grafana Agent +// Configure your first components, learn about the standard library, and learn how to run Grafana Alloy // prometheus.exporter.redis collects information about Redis and exposes // targets for other components to use @@ -267,8 +268,27 @@ prometheus.remote_write "local_prom" { ## Finishing up and next steps -You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. - -If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. - -In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. You will also look at using different components to process metrics and logs before sending them. +You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. +This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). +If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. + +If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. +Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. + +In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. +You will also look at using different components to process metrics and logs before sending them. + +[Configuration language]: ../../../concepts/config-language/ +[Configuration language concepts]: ../../../concepts/configuration_language/ +[Standard library documentation]: ../../../reference/stdlib/ +[node_exporter]: https://github.com/prometheus/node_exporter +[River]: https://github.com/grafana/river +[prometheus.exporter.redis]: ../../../reference/components/prometheus.exporter.redis/ +[http://localhost:3000/explore]: http://localhost:3000/explore +[prometheus.exporter.unix]: ../../../reference/components/prometheus.exporter.unix/ +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[Components]: ../../../concepts/components/ +[Component controller]: ../../../concepts/component_controller/ +[Components configuration language]: ../../../concepts/config-language/components/ +[env]: ../../../reference/stdlib/env/ diff --git a/docs/sources/tutorials/flow-by-example/get-started.md b/docs/sources/tutorials/flow-by-example/get-started.md new file mode 100644 index 0000000000..93d3fc0285 --- /dev/null +++ b/docs/sources/tutorials/flow-by-example/get-started.md @@ -0,0 +1,97 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/get-started/ +description: Getting started with Flow-by-Example Tutorials +title: Get started +weight: 10 +--- + +## Who is this for? + +This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][alloy]. +It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. +It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. +It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. + +## What is {{% param "PRODUCT_NAME" %}}? + +{{< param "PRODUCT_NAME" >}} uses a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. +It is built on top of the [River][] configuration language, which is designed to be fast, simple, and debuggable. + +## What do I need to get started? + +You will need a Linux or Unix environment with Docker installed. +The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. +You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. + +To run the examples, you should have a {{< param "PRODUCT_NAME" >}} binary available. +You can follow the instructions on how to [Install {{< param "PRODUCT_NAME" >}} as a Standalone Binary][install] to get a binary. + +## How should I follow along? + +You can use this Docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. +The examples are designed to be run locally, so you can follow along and experiment with them yourself. + +```yaml +version: '3' +services: + loki: + image: grafana/loki:2.9.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:latest + ports: + - "3000:3000" +``` + +After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. + +The tutorials are designed to be followed in order and generally build on each other. +Each example explains what it does and how it works. +They are designed to be run locally, so you can follow along and experiment with them yourself. + +The Recommended Reading sections in each tutorial provide a list of documentation topics. +To help you understand the concepts used in the example, read the recommended topics in the order given. + +[alloy]: https://grafana.com/docs/alloy/latest/ +[River]: https://github.com/grafana/river +[install]: ../../../setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary diff --git a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md b/docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md similarity index 64% rename from docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md rename to docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md index 02c7c3c138..f5d9c97820 100644 --- a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md +++ b/docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/logs-and-relabeling-basics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/logs-and-relabeling-basics/ description: Learn how to relabel metrics and collect logs title: Logs and relabeling basics weight: 30 @@ -12,17 +7,17 @@ weight: 30 # Logs and relabeling basics -This tutorial assumes you have completed the [First components and introducing the standard library](https://grafana.com/docs/agent//flow/tutorials/flow-by-example/first-components-and-stdlib/) tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. You will cover some basic metric relabeling, followed by how to send logs to Loki. +This tutorial assumes you have completed the [First components and introducing the standard library][] tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. +You will cover some basic metric relabeling, followed by how to send logs to Loki. ## Relabel metrics -[prometheus.relabel]: https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/ - **Recommended reading** - Optional: [prometheus.relabel][] -Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape config. +Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. +The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape configuration. Let's add a `prometheus.relabel` component to a basic pipeline and see how to add labels. @@ -64,35 +59,37 @@ We have now created the following pipeline: This pipeline has a `prometheus.relabel` component that has a single rule. This rule has the `replace` action, which will replace the value of the `os` label with a special value: `constants.os`. This value is a special constant that is replaced with the OS of the host {{< param "PRODUCT_ROOT_NAME" >}} is running on. -You can see the other available constants in the [constants](https://grafana.com/docs/agent//flow/reference/stdlib/constants/) documentation. +You can see the other available constants in the [constants][] documentation. This example has one rule block, but you can have as many as you want. Each rule block is applied in order. -If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore](http://localhost:3000/explore), you can see the `os` label on the metrics. Try querying for `node_context_switches_total` and look at the labels. +If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore][], you can see the `os` label on the metrics. +Try querying for `node_context_switches_total` and look at the labels. -Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel documentation](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/#rule-block) for a full list of available options. +Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel rule-block][] documentation for a full list of available options. {{< admonition type="note" >}} You can forward multiple components to one `prometheus.relabel` component. This allows you to apply the same relabeling rules to multiple pipelines. {{< /admonition >}} {{< admonition type="warning" >}} -There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. If you would like to keep or act on these kinds of labels, use a [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) component. +There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). +These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. +If you would like to keep or act on these kinds of labels, use a [discovery.relabel][] component. + +[discovery.relabel]: ../../../reference/components/discovery.relabel/ {{< /admonition >}} ## Send logs to Loki -[local.file_match]: https://grafana.com/docs/agent//flow/reference/components/local.file_match/ -[loki.source.file]: https://grafana.com/docs/agent//flow/reference/components/loki.source.file/ -[loki.write]: https://grafana.com/docs/agent//flow/reference/components/loki.write/ - **Recommended reading** - Optional: [local.file_match][] - Optional: [loki.source.file][] - Optional: [loki.write][] -Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. +Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. +We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. Before doing this, we need to ensure we have a log file to scrape. We will use the `echo` command to create a file with some log content. @@ -124,7 +121,8 @@ The rough flow of this pipeline is: ![Diagram of pipeline that collects logs from /tmp/flow-logs and writes them to a local Loki instance](/media/docs/agent/diagram-flow-by-example-logs-0.svg) -If you navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. Try running the following command to add more logs to the file. +If you navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. +Try running the following command to add more logs to the file. ```bash echo "This is another log line!" >> /tmp/flow-logs/log.log @@ -134,14 +132,11 @@ If you re-execute the query, you can see the new log lines. ![Grafana Explore view of example log lines](/media/docs/agent/screenshot-flow-by-example-log-lines.png) -If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it is in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. +If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it's in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from the beginning of the file again, which is why keeping the {{< param "PRODUCT_ROOT_NAME" >}}'s data directory in a persistent location is desirable. ## Exercise -[loki.relabel]: https://grafana.com/docs/agent//flow/reference/components/loki.relabel/ -[loki.process]: https://grafana.com/docs/agent//flow/reference/components/loki.process/ - **Recommended reading** - [loki.relabel][] @@ -149,7 +144,8 @@ If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from t ### Add a Label to Logs -This exercise will have two parts, building on the previous example. Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. +This exercise will have two parts, building on the previous example. +Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. Modify the following snippet to add the label `os` with the value of the `os` constant. @@ -171,7 +167,10 @@ loki.write "local_loki" { ``` {{< admonition type="note" >}} -You can use the [loki.relabel](https://grafana.com/docs/agent//flow/reference/components/loki.relabel) component to relabel and add labels, just like you can with the [prometheus.relabel](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel) component. +You can use the [loki.relabel][] component to relabel and add labels, just like you can with the [prometheus.relabel][] component. + +[loki.relabel]: ../../../reference/components/loki.relabel +[prometheus.relabel]: ../../../reference/components/prometheus.relabel {{< /admonition >}} Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: @@ -182,9 +181,11 @@ echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.lo echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log ``` -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. +Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! -Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. You should only see the lines you added in the previous step. +Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. +You should only see the lines you added in the previous step. {{< collapse title="Solution" >}} @@ -221,10 +222,12 @@ loki.write "local_loki" { ### Extract and add a Label from Logs {{< admonition type="note" >}} -This exercise is more challenging than the previous one. If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. You can always come back to this exercise later. +This exercise is more challenging than the previous one. +If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. +You can always come back to this exercise later. {{< /admonition >}} -This exercise will build on the previous one, though it's more involved. +This exercise will build on the previous one, though it's more involved. Let's say we want to extract the `level` from the logs and add it as a label. As a starting point, look at [loki.process][]. This component allows you to perform processing on logs, including extracting values from log contents. @@ -236,7 +239,7 @@ If needed, you can find a solution to the previous exercise at the end of the [p The `stage.logfmt` and `stage.labels` blocks for `loki.process` may be helpful. {{< /admonition >}} -Once you have your completed config, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: +Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: ```bash echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log @@ -244,7 +247,7 @@ echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.lo echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log ``` -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. ![Grafana Explore view of example log lines, now with the extracted 'level' label](/media/docs/agent/screenshot-flow-by-example-log-line-levels.png) @@ -304,5 +307,16 @@ loki.write "local_loki" { ## Finishing up and next steps -You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. - +You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. +In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. + +[First components and introducing the standard library]: ../first-components-and-stdlib/ +[prometheus.relabel]: ../../../reference/components/prometheus.relabel/ +[constants]: ../../../reference/stdlib/constants/ +[localhost:3000/explore]: http://localhost:3000/explore +[prometheus.relabel rule-block]: ../../../reference/components/prometheus.relabel/#rule-block +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[loki.relabel]: ../../../reference/components/loki.relabel/ +[loki.process]: ../../../reference/components/loki.process/ diff --git a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md b/docs/sources/tutorials/flow-by-example/processing-logs/index.md similarity index 81% rename from docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md rename to docs/sources/tutorials/flow-by-example/processing-logs/index.md index 327b40716c..22e52dc001 100644 --- a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md +++ b/docs/sources/tutorials/flow-by-example/processing-logs/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/processing-logs/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/processing-logs/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/processing-logs/ description: Learn how to process logs title: Processing Logs weight: 40 @@ -19,7 +14,7 @@ It covers using `loki.source.api` to receive logs over HTTP, processing and filt **Recommended reading** -- Optional: [loki.source.api](https://grafana.com/docs/agent//flow/reference/components/loki.source.api/) +- Optional: [loki.source.api][] The `loki.source.api` component can receive logs over HTTP. It can be useful for receiving logs from other {{< param "PRODUCT_ROOT_NAME" >}}s or collectors, or directly from applications that can send logs over HTTP, and then processing them centrally. @@ -51,9 +46,9 @@ Next, you can configure the `loki.process` and `loki.write` components. **Recommended reading** -- [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) -- [loki.process#stage.json](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagejson-block) -- [loki.process#stage.labels](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagelabels-block) +- [loki.process#stage.drop][] +- [loki.process#stage.json][] +- [loki.process#stage.labels][] ```river // Let's send and process more logs! @@ -142,7 +137,8 @@ In subsequent stages, you can use the extracted map to filter logs, add or remov `stage.*` blocks are executed in the order they appear in the component, top down. {{< /admonition >}} -Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. Here is our example log line: +Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. +Here is our example log line: ```json { @@ -166,10 +162,11 @@ stage.json { } ``` -This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. +This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. {{< admonition type="note" >}} -Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. +Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). +The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. {{< /admonition >}} If this were Python, it would be roughly equivalent to: @@ -293,7 +290,7 @@ stage.drop { This stage acts on the `is_secret` value in the extracted map, which is a value that you extracted in the previous stage. This stage drops the log line if the value of `is_secret` is `"true"` and does not modify the extracted map. There are many other ways to filter logs, but this is a simple example. -Refer to the [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) documentation for more information. +Refer to the [loki.process#stage.drop][] documentation for more information. ### Stage 5 @@ -320,12 +317,12 @@ stage.output { This stage uses the `log_line` value in the extracted map to set the actual log line that is forwarded to Loki. Rather than sending the entire JSON blob to Loki, you are only sending `original_log_line["log"]["message"]`, along with some labels that you attached. -This stage does not modify the extracted map. +This stage doesn't modify the extracted map. ## Putting it all together -Now that you have all of the pieces, let's run the {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. -Modify `config.river` with the config from the previous example and start the {{< param "PRODUCT_ROOT_NAME" >}} with: +Now that you have all of the pieces, let's run {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. +Modify `config.river` with the config from the previous example and start {{< param "PRODUCT_ROOT_NAME" >}} with: ```bash /path/to/agent run config.river @@ -344,7 +341,7 @@ curl localhost:9999/loki/api/v1/raw -XPOST -H "Content-Type: application/json" - ``` Now that you have sent some logs, let's see how they look in Grafana. -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. Try querying for `{source="demo-api"}` and see if you can find the logs you sent. Try playing around with the values of `"level"`, `"message"`, `"timestamp"`, and `"is_secret"` and see how the logs change. @@ -355,12 +352,12 @@ You can also try adding more stages to the `loki.process` component to extract m ## Exercise Since you are already using Docker and Docker exports logs, let's get those logs into Loki. -You can refer to the [discovery.docker](https://grafana.com/docs/agent//flow/reference/components/discovery.docker/) and [loki.source.docker](https://grafana.com/docs/agent//flow/reference/components/loki.source.docker/) documentation for more information. +You can refer to the [discovery.docker][] and [loki.source.docker][] documentation for more information. To ensure proper timestamps and other labels, make sure you use a `loki.process` component to process the logs before sending them to Loki. -Although you have not used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. -You can refer to the [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) documentation for more information. +Although you haven't used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. +You can refer to the [discovery.relabel][] documentation for more information. The `discovery.relabel` component is very similar to the `prometheus.relabel` component, but is used to relabel discovered targets rather than metrics. {{< collapse title="Solution" >}} @@ -404,4 +401,13 @@ loki.write "local_loki" { } ``` -{{< /collapse >}} \ No newline at end of file +{{< /collapse >}} + +[loki.source.api]: ../../../reference/components/loki.source.api/ +[loki.process#stage.drop]: ../../../reference/components/loki.process/#stagedrop-block +[loki.process#stage.json]: ../../../reference/components/loki.process/#stagejson-block +[loki.process#stage.labels]: ../../../reference/components/loki.process/#stagelabels-block +[localhost:3000/explore]: http://localhost:3000/explore +[discovery.docker]: ../../../reference/components/discovery.docker/ +[loki.source.docker]: ../../../reference/components/loki.source.docker/ +[discovery.relabel]: ../../../reference/components/discovery.relabel/ diff --git a/docs/variables.mk b/docs/variables.mk index 82dba27a62..43189540d9 100644 --- a/docs/variables.mk +++ b/docs/variables.mk @@ -4,4 +4,4 @@ # This results in the content being served at /docs/agent/latest/. # The source of the content is the current repository which is determined by the name of the parent directory of the git root. # This overrides the default behavior of assuming the repository directory is the same as the project name. -PROJECTS := agent::$(notdir $(basename $(shell git rev-parse --show-toplevel))) +PROJECTS := alloy::$(notdir $(basename $(shell git rev-parse --show-toplevel))) diff --git a/internal/tools/docs_generator/docs_updated_test.go b/internal/tools/docs_generator/docs_updated_test.go index e21822d7c0..3c45abe4df 100644 --- a/internal/tools/docs_generator/docs_updated_test.go +++ b/internal/tools/docs_generator/docs_updated_test.go @@ -31,7 +31,7 @@ func TestLinksToTypesSectionsUpdated(t *testing.T) { } func TestCompatibleComponentsPageUpdated(t *testing.T) { - path := filepath.Join(moduleRoot, "docs/sources/flow/reference/compatibility/_index.md") + path := filepath.Join(moduleRoot, "docs/sources/reference/compatibility/_index.md") for _, typ := range metadata.AllTypes { t.Run(typ.Name, func(t *testing.T) { t.Run("exporters", func(t *testing.T) { diff --git a/internal/tools/docs_generator/links_to_types.go b/internal/tools/docs_generator/links_to_types.go index 4c92c0b23e..e6960fe83b 100644 --- a/internal/tools/docs_generator/links_to_types.go +++ b/internal/tools/docs_generator/links_to_types.go @@ -76,7 +76,7 @@ func (l *LinksToTypesGenerator) endMarker() string { } func (l *LinksToTypesGenerator) pathToComponentMarkdown() string { - return fmt.Sprintf("../../../docs/sources/flow/reference/components/%s.md", l.component) + return fmt.Sprintf("../../../docs/sources/reference/components/%s.md", l.component) } func outputComponentsSection(name string, meta metadata.Metadata) string { From 2ff16d2936abe794c2e4b2e0110e73c640f3456c Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 1 Mar 2024 11:53:01 -0500 Subject: [PATCH 006/136] misc: port github.com/grafana/river into syntax (#17) This commit brings the github.com/grafana/river code into the syntax package, which is intended to be a submodule. There are a few reasons to do this: 1. "River" is being sunset as a term in favor of the "Alloy configuration syntax," so it no longer makes sense for River to exist on its own. 2. With the transition to Alloy configuration syntax, error messages and alike should remove references to the word "River." 3. It is likely that the Alloy configuration syntax will be receiving a stream of updates soon after the 1.0 release, where having it in a separate repo would slow us down and risks desyncing documentation. 4. There are projects which depend on River, so it must be importable if we bring it into Alloy. However, since we don't want to mark the Go API as 1.0 yet, constraining the Alloy configuration syntax to a submodule allows us to version it separately. --- go.mod | 13 +- go.sum | 10 - .../configs/otel-metrics-gen/Dockerfile | 1 + .../configs/prom-gen/Dockerfile | 1 + syntax/ast/ast.go | 328 +++++++ syntax/ast/walk.go | 73 ++ syntax/cmd/riverfmt/main.go | 103 +++ syntax/diag/diag.go | 95 +++ syntax/diag/printer.go | 266 ++++++ syntax/diag/printer_test.go | 222 +++++ syntax/encoding/riverjson/riverjson.go | 313 +++++++ syntax/encoding/riverjson/riverjson_test.go | 363 ++++++++ syntax/encoding/riverjson/types.go | 41 + syntax/go.mod | 18 + syntax/go.sum | 22 + syntax/internal/reflectutil/walk.go | 89 ++ syntax/internal/reflectutil/walk_test.go | 72 ++ syntax/internal/rivertags/rivertags.go | 346 ++++++++ syntax/internal/rivertags/rivertags_test.go | 182 ++++ syntax/internal/stdlib/constants.go | 19 + syntax/internal/stdlib/stdlib.go | 132 +++ syntax/internal/value/capsule.go | 53 ++ syntax/internal/value/decode.go | 674 +++++++++++++++ .../internal/value/decode_benchmarks_test.go | 90 ++ syntax/internal/value/decode_test.go | 761 +++++++++++++++++ syntax/internal/value/errors.go | 107 +++ syntax/internal/value/number_value.go | 135 +++ syntax/internal/value/raw_function.go | 9 + syntax/internal/value/tag_cache.go | 121 +++ syntax/internal/value/type.go | 157 ++++ syntax/internal/value/type_test.go | 80 ++ syntax/internal/value/value.go | 556 ++++++++++++ syntax/internal/value/value_object.go | 119 +++ syntax/internal/value/value_object_test.go | 205 +++++ syntax/internal/value/value_test.go | 243 ++++++ syntax/parser/error_test.go | 148 ++++ syntax/parser/internal.go | 714 ++++++++++++++++ syntax/parser/internal_test.go | 22 + syntax/parser/parser.go | 43 + syntax/parser/parser_test.go | 123 +++ .../testdata/assign_block_to_attr.river | 32 + syntax/parser/testdata/attribute_names.river | 7 + syntax/parser/testdata/block_names.river | 25 + syntax/parser/testdata/commas.river | 13 + ...fad53537b46efdaa76e024a5ef4955d01a68bdac37 | 2 + ...f4c6c80f4ba9099c21ffa2b6869e75e99565dce037 | 2 + ...77c839a06204b55f2636597901d8d7878150d8580a | 2 + syntax/parser/testdata/invalid_exprs.river | 4 + .../parser/testdata/invalid_object_key.river | 9 + syntax/parser/testdata/valid/attribute.river | 1 + syntax/parser/testdata/valid/blocks.river | 36 + syntax/parser/testdata/valid/comments.river | 1 + syntax/parser/testdata/valid/empty.river | 0 .../parser/testdata/valid/expressions.river | 81 ++ syntax/printer/printer.go | 556 ++++++++++++ syntax/printer/printer_test.go | 77 ++ syntax/printer/testdata/.gitattributes | 1 + syntax/printer/testdata/array_comments.expect | 17 + syntax/printer/testdata/array_comments.in | 17 + syntax/printer/testdata/block_comments.expect | 62 ++ syntax/printer/testdata/block_comments.in | 64 ++ syntax/printer/testdata/example.expect | 60 ++ syntax/printer/testdata/example.in | 64 ++ syntax/printer/testdata/func_call.expect | 17 + syntax/printer/testdata/func_call.in | 17 + syntax/printer/testdata/mixed_list.expect | 16 + syntax/printer/testdata/mixed_list.in | 16 + syntax/printer/testdata/mixed_object.expect | 8 + syntax/printer/testdata/mixed_object.in | 7 + syntax/printer/testdata/object_align.expect | 11 + syntax/printer/testdata/object_align.in | 11 + syntax/printer/testdata/oneline_block.expect | 11 + syntax/printer/testdata/oneline_block.in | 14 + syntax/printer/testdata/raw_string.expect | 15 + syntax/printer/testdata/raw_string.in | 15 + .../testdata/raw_string_label_error.error | 1 + .../testdata/raw_string_label_error.in | 15 + syntax/printer/trimmer.go | 115 +++ syntax/printer/walker.go | 338 ++++++++ syntax/river.go | 346 ++++++++ syntax/river_test.go | 152 ++++ syntax/rivertypes/optional_secret.go | 84 ++ syntax/rivertypes/optional_secret_test.go | 92 ++ syntax/rivertypes/secret.go | 65 ++ syntax/rivertypes/secret_test.go | 47 + syntax/scanner/identifier.go | 60 ++ syntax/scanner/identifier_test.go | 92 ++ syntax/scanner/scanner.go | 704 +++++++++++++++ syntax/scanner/scanner_test.go | 272 ++++++ syntax/token/builder/builder.go | 419 +++++++++ syntax/token/builder/builder_test.go | 411 +++++++++ syntax/token/builder/nested_defaults_test.go | 233 +++++ syntax/token/builder/token.go | 81 ++ syntax/token/builder/value_tokens.go | 95 +++ syntax/token/file.go | 142 ++++ syntax/token/token.go | 174 ++++ syntax/types.go | 97 +++ syntax/vm/constant.go | 64 ++ syntax/vm/error.go | 106 +++ syntax/vm/op_binary.go | 360 ++++++++ syntax/vm/op_binary_test.go | 94 ++ syntax/vm/op_unary.go | 33 + syntax/vm/struct_decoder.go | 323 +++++++ syntax/vm/tag_cache.go | 80 ++ syntax/vm/vm.go | 486 +++++++++++ syntax/vm/vm_benchmarks_test.go | 106 +++ syntax/vm/vm_block_test.go | 802 ++++++++++++++++++ syntax/vm/vm_errors_test.go | 80 ++ syntax/vm/vm_stdlib_test.go | 232 +++++ syntax/vm/vm_test.go | 277 ++++++ 110 files changed, 15423 insertions(+), 15 deletions(-) create mode 100644 syntax/ast/ast.go create mode 100644 syntax/ast/walk.go create mode 100644 syntax/cmd/riverfmt/main.go create mode 100644 syntax/diag/diag.go create mode 100644 syntax/diag/printer.go create mode 100644 syntax/diag/printer_test.go create mode 100644 syntax/encoding/riverjson/riverjson.go create mode 100644 syntax/encoding/riverjson/riverjson_test.go create mode 100644 syntax/encoding/riverjson/types.go create mode 100644 syntax/go.mod create mode 100644 syntax/go.sum create mode 100644 syntax/internal/reflectutil/walk.go create mode 100644 syntax/internal/reflectutil/walk_test.go create mode 100644 syntax/internal/rivertags/rivertags.go create mode 100644 syntax/internal/rivertags/rivertags_test.go create mode 100644 syntax/internal/stdlib/constants.go create mode 100644 syntax/internal/stdlib/stdlib.go create mode 100644 syntax/internal/value/capsule.go create mode 100644 syntax/internal/value/decode.go create mode 100644 syntax/internal/value/decode_benchmarks_test.go create mode 100644 syntax/internal/value/decode_test.go create mode 100644 syntax/internal/value/errors.go create mode 100644 syntax/internal/value/number_value.go create mode 100644 syntax/internal/value/raw_function.go create mode 100644 syntax/internal/value/tag_cache.go create mode 100644 syntax/internal/value/type.go create mode 100644 syntax/internal/value/type_test.go create mode 100644 syntax/internal/value/value.go create mode 100644 syntax/internal/value/value_object.go create mode 100644 syntax/internal/value/value_object_test.go create mode 100644 syntax/internal/value/value_test.go create mode 100644 syntax/parser/error_test.go create mode 100644 syntax/parser/internal.go create mode 100644 syntax/parser/internal_test.go create mode 100644 syntax/parser/parser.go create mode 100644 syntax/parser/parser_test.go create mode 100644 syntax/parser/testdata/assign_block_to_attr.river create mode 100644 syntax/parser/testdata/attribute_names.river create mode 100644 syntax/parser/testdata/block_names.river create mode 100644 syntax/parser/testdata/commas.river create mode 100644 syntax/parser/testdata/fuzz/FuzzParser/1a39f4e358facc21678b16fad53537b46efdaa76e024a5ef4955d01a68bdac37 create mode 100644 syntax/parser/testdata/fuzz/FuzzParser/248cf4391f6c48550b7d2cf4c6c80f4ba9099c21ffa2b6869e75e99565dce037 create mode 100644 syntax/parser/testdata/fuzz/FuzzParser/b919fa00ebca318001778477c839a06204b55f2636597901d8d7878150d8580a create mode 100644 syntax/parser/testdata/invalid_exprs.river create mode 100644 syntax/parser/testdata/invalid_object_key.river create mode 100644 syntax/parser/testdata/valid/attribute.river create mode 100644 syntax/parser/testdata/valid/blocks.river create mode 100644 syntax/parser/testdata/valid/comments.river create mode 100644 syntax/parser/testdata/valid/empty.river create mode 100644 syntax/parser/testdata/valid/expressions.river create mode 100644 syntax/printer/printer.go create mode 100644 syntax/printer/printer_test.go create mode 100644 syntax/printer/testdata/.gitattributes create mode 100644 syntax/printer/testdata/array_comments.expect create mode 100644 syntax/printer/testdata/array_comments.in create mode 100644 syntax/printer/testdata/block_comments.expect create mode 100644 syntax/printer/testdata/block_comments.in create mode 100644 syntax/printer/testdata/example.expect create mode 100644 syntax/printer/testdata/example.in create mode 100644 syntax/printer/testdata/func_call.expect create mode 100644 syntax/printer/testdata/func_call.in create mode 100644 syntax/printer/testdata/mixed_list.expect create mode 100644 syntax/printer/testdata/mixed_list.in create mode 100644 syntax/printer/testdata/mixed_object.expect create mode 100644 syntax/printer/testdata/mixed_object.in create mode 100644 syntax/printer/testdata/object_align.expect create mode 100644 syntax/printer/testdata/object_align.in create mode 100644 syntax/printer/testdata/oneline_block.expect create mode 100644 syntax/printer/testdata/oneline_block.in create mode 100644 syntax/printer/testdata/raw_string.expect create mode 100644 syntax/printer/testdata/raw_string.in create mode 100644 syntax/printer/testdata/raw_string_label_error.error create mode 100644 syntax/printer/testdata/raw_string_label_error.in create mode 100644 syntax/printer/trimmer.go create mode 100644 syntax/printer/walker.go create mode 100644 syntax/river.go create mode 100644 syntax/river_test.go create mode 100644 syntax/rivertypes/optional_secret.go create mode 100644 syntax/rivertypes/optional_secret_test.go create mode 100644 syntax/rivertypes/secret.go create mode 100644 syntax/rivertypes/secret_test.go create mode 100644 syntax/scanner/identifier.go create mode 100644 syntax/scanner/identifier_test.go create mode 100644 syntax/scanner/scanner.go create mode 100644 syntax/scanner/scanner_test.go create mode 100644 syntax/token/builder/builder.go create mode 100644 syntax/token/builder/builder_test.go create mode 100644 syntax/token/builder/nested_defaults_test.go create mode 100644 syntax/token/builder/token.go create mode 100644 syntax/token/builder/value_tokens.go create mode 100644 syntax/token/file.go create mode 100644 syntax/token/token.go create mode 100644 syntax/types.go create mode 100644 syntax/vm/constant.go create mode 100644 syntax/vm/error.go create mode 100644 syntax/vm/op_binary.go create mode 100644 syntax/vm/op_binary_test.go create mode 100644 syntax/vm/op_unary.go create mode 100644 syntax/vm/struct_decoder.go create mode 100644 syntax/vm/tag_cache.go create mode 100644 syntax/vm/vm.go create mode 100644 syntax/vm/vm_benchmarks_test.go create mode 100644 syntax/vm/vm_block_test.go create mode 100644 syntax/vm/vm_errors_test.go create mode 100644 syntax/vm/vm_stdlib_test.go create mode 100644 syntax/vm/vm_test.go diff --git a/go.mod b/go.mod index 81e677a27a..23368dd02b 100644 --- a/go.mod +++ b/go.mod @@ -29,14 +29,13 @@ require ( github.com/docker/go-connections v0.4.0 github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 github.com/fatih/color v1.15.0 - github.com/fatih/structs v1.1.0 github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.6.0 github.com/github/smimesign v0.2.0 github.com/go-git/go-git/v5 v5.11.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.1 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible github.com/go-sql-driver/mysql v1.7.1 github.com/gogo/protobuf v1.3.2 @@ -45,7 +44,6 @@ require ( github.com/google/cadvisor v0.47.0 github.com/google/dnsmasq_exporter v0.2.1-0.20230620100026-44b14480804a github.com/google/go-cmp v0.6.0 - github.com/google/go-jsonnet v0.18.0 github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.4.0 @@ -162,7 +160,6 @@ require ( github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 github.com/testcontainers/testcontainers-go v0.25.0 - github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/vincent-petithory/dataurl v1.0.0 github.com/webdevops/azure-metrics-exporter v0.0.0-20230717202958-8701afc2b013 @@ -226,7 +223,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible k8s.io/api v0.28.3 - k8s.io/apiextensions-apiserver v0.28.0 + k8s.io/apiextensions-apiserver v0.28.0 // indirect k8s.io/client-go v0.28.3 k8s.io/component-base v0.28.1 k8s.io/klog/v2 v2.100.1 @@ -766,3 +763,9 @@ exclude ( ) replace github.com/github/smimesign => github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3 + +// Submodules. +// TODO(rfratto): Change all imports of github.com/grafana/river in favor of +// importing github.com/grafana/alloy/syntax and change module and package +// names to remove references of "river". +replace github.com/grafana/river => ./syntax diff --git a/go.sum b/go.sum index 95dd6c93ba..93cd5ab33b 100644 --- a/go.sum +++ b/go.sum @@ -666,12 +666,10 @@ github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8 github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structs v0.0.0-20180123065059-ebf56d35bba7/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= @@ -957,8 +955,6 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= -github.com/google/go-jsonnet v0.18.0 h1:/6pTy6g+Jh1a1I2UMoAODkqELFiVIdOxbNwv0DDzoOg= -github.com/google/go-jsonnet v0.18.0/go.mod h1:C3fTzyVJDslXdiTqw/bTFk7vSGyCtH3MGRbDfvEwGd0= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -1084,8 +1080,6 @@ github.com/grafana/pyroscope/ebpf v0.4.3 h1:gPfm2FKabdycRfFIej/s0awSzsbAaoSefaeh github.com/grafana/pyroscope/ebpf v0.4.3/go.mod h1:Iv66aj9WsDWR8bGMPQzCQPCgVgCru0KizGrbcR3YmLk= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 h1:mCOKdWkLv8n9X0ORWrPR+W/zLOAa1o6iM+Dfy0ofQUs= -github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1/go.mod h1:tAiNX2zt3HUsNyPNUDSvE6AgQ4+kqJvljBI+ACppMtM= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3 h1:UPkAxuhlAcRmJT3/qd34OMTl+ZU7BLLfOO2+NXBlJpY= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4= github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 h1:tkT0yha3JzB5S5VNjfY4lT0cJAe20pU8XGt3Nuq73rM= @@ -1529,7 +1523,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -2074,7 +2067,6 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -2196,8 +2188,6 @@ github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZ github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= github.com/testcontainers/testcontainers-go v0.25.0 h1:erH6cQjsaJrH+rJDU9qIf89KFdhK0Bft0aEZHlYC3Vs= github.com/testcontainers/testcontainers-go v0.25.0/go.mod h1:4sC9SiJyzD1XFi59q8umTQYWxnkweEc5OjVtTUlJzqQ= -github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d h1:KyYCHo9iBoQYw5AzcozD/77uNbFlRjTmMTA7QjSxHOQ= -github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d/go.mod h1:Pa91ahCbzRB6d9FBi6UAjurTEm7WmyBVeuklLkwAKKs= github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU= github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= diff --git a/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile b/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile index 0270edbd0b..bc0c2cf3a9 100644 --- a/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile +++ b/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile @@ -1,6 +1,7 @@ FROM golang:1.21 as build WORKDIR /app/ COPY go.mod go.sum ./ +COPY syntax/go.mod syntax/go.sum ./syntax/ RUN go mod download COPY ./internal/cmd/integration-tests/configs/otel-metrics-gen/ ./ RUN CGO_ENABLED=0 go build -o main main.go diff --git a/internal/cmd/integration-tests/configs/prom-gen/Dockerfile b/internal/cmd/integration-tests/configs/prom-gen/Dockerfile index d1e0bfdcaf..875b7bad7e 100644 --- a/internal/cmd/integration-tests/configs/prom-gen/Dockerfile +++ b/internal/cmd/integration-tests/configs/prom-gen/Dockerfile @@ -1,6 +1,7 @@ FROM golang:1.21 as build WORKDIR /app/ COPY go.mod go.sum ./ +COPY syntax/go.mod syntax/go.sum ./syntax/ RUN go mod download COPY ./internal/cmd/integration-tests/configs/prom-gen/ ./ RUN CGO_ENABLED=0 go build -o main main.go diff --git a/syntax/ast/ast.go b/syntax/ast/ast.go new file mode 100644 index 0000000000..992ee0c71a --- /dev/null +++ b/syntax/ast/ast.go @@ -0,0 +1,328 @@ +// Package ast exposes AST elements used by River. +// +// The various interfaces exposed by ast are all closed; only types within this +// package can satisfy an AST interface. +package ast + +import ( + "fmt" + "reflect" + "strings" + + "github.com/grafana/river/token" +) + +// Node represents any node in the AST. +type Node interface { + astNode() +} + +// Stmt is a type of statement within the body of a file or block. +type Stmt interface { + Node + astStmt() +} + +// Expr is an expression within the AST. +type Expr interface { + Node + astExpr() +} + +// File is a parsed file. +type File struct { + Name string // Filename provided to parser + Body Body // Content of File + Comments []CommentGroup // List of all comments in the File +} + +// Body is a list of statements. +type Body []Stmt + +// A CommentGroup represents a sequence of comments that are not separated by +// any empty lines or other non-comment tokens. +type CommentGroup []*Comment + +// A Comment represents a single line or block comment. +// +// The Text field contains the comment text without any carriage returns (\r) +// that may have been present in the source. Since carriage returns get +// removed, EndPos will not be accurate for any comment which contained +// carriage returns. +type Comment struct { + StartPos token.Pos // Starting position of comment + // Text of the comment. Text will not contain '\n' for line comments. + Text string +} + +// AttributeStmt is a key-value pair being set in a Body or BlockStmt. +type AttributeStmt struct { + Name *Ident + Value Expr +} + +// BlockStmt declares a block. +type BlockStmt struct { + Name []string + NamePos token.Pos + Label string + LabelPos token.Pos + Body Body + + LCurlyPos, RCurlyPos token.Pos +} + +// Ident holds an identifier with its position. +type Ident struct { + Name string + NamePos token.Pos +} + +// IdentifierExpr refers to a named value. +type IdentifierExpr struct { + Ident *Ident +} + +// LiteralExpr is a constant value of a specific token kind. +type LiteralExpr struct { + Kind token.Token + ValuePos token.Pos + + // Value holds the unparsed literal value. For example, if Kind == + // token.STRING, then Value would be wrapped in the original quotes (e.g., + // `"foobar"`). + Value string +} + +// ArrayExpr is an array of values. +type ArrayExpr struct { + Elements []Expr + LBrackPos, RBrackPos token.Pos +} + +// ObjectExpr declares an object of key-value pairs. +type ObjectExpr struct { + Fields []*ObjectField + LCurlyPos, RCurlyPos token.Pos +} + +// ObjectField defines an individual key-value pair within an object. +// ObjectField does not implement Node. +type ObjectField struct { + Name *Ident + Quoted bool // True if the name was wrapped in quotes + Value Expr +} + +// AccessExpr accesses a field in an object value by name. +type AccessExpr struct { + Value Expr + Name *Ident +} + +// IndexExpr accesses an index in an array value. +type IndexExpr struct { + Value, Index Expr + LBrackPos, RBrackPos token.Pos +} + +// CallExpr invokes a function value with a set of arguments. +type CallExpr struct { + Value Expr + Args []Expr + + LParenPos, RParenPos token.Pos +} + +// UnaryExpr performs a unary operation on a single value. +type UnaryExpr struct { + Kind token.Token + KindPos token.Pos + Value Expr +} + +// BinaryExpr performs a binary operation against two values. +type BinaryExpr struct { + Kind token.Token + KindPos token.Pos + Left, Right Expr +} + +// ParenExpr represents an expression wrapped in parentheses. +type ParenExpr struct { + Inner Expr + LParenPos, RParenPos token.Pos +} + +// Type assertions + +var ( + _ Node = (*File)(nil) + _ Node = (*Body)(nil) + _ Node = (*AttributeStmt)(nil) + _ Node = (*BlockStmt)(nil) + _ Node = (*Ident)(nil) + _ Node = (*IdentifierExpr)(nil) + _ Node = (*LiteralExpr)(nil) + _ Node = (*ArrayExpr)(nil) + _ Node = (*ObjectExpr)(nil) + _ Node = (*AccessExpr)(nil) + _ Node = (*IndexExpr)(nil) + _ Node = (*CallExpr)(nil) + _ Node = (*UnaryExpr)(nil) + _ Node = (*BinaryExpr)(nil) + _ Node = (*ParenExpr)(nil) + + _ Stmt = (*AttributeStmt)(nil) + _ Stmt = (*BlockStmt)(nil) + + _ Expr = (*IdentifierExpr)(nil) + _ Expr = (*LiteralExpr)(nil) + _ Expr = (*ArrayExpr)(nil) + _ Expr = (*ObjectExpr)(nil) + _ Expr = (*AccessExpr)(nil) + _ Expr = (*IndexExpr)(nil) + _ Expr = (*CallExpr)(nil) + _ Expr = (*UnaryExpr)(nil) + _ Expr = (*BinaryExpr)(nil) + _ Expr = (*ParenExpr)(nil) +) + +func (n *File) astNode() {} +func (n Body) astNode() {} +func (n CommentGroup) astNode() {} +func (n *Comment) astNode() {} +func (n *AttributeStmt) astNode() {} +func (n *BlockStmt) astNode() {} +func (n *Ident) astNode() {} +func (n *IdentifierExpr) astNode() {} +func (n *LiteralExpr) astNode() {} +func (n *ArrayExpr) astNode() {} +func (n *ObjectExpr) astNode() {} +func (n *AccessExpr) astNode() {} +func (n *IndexExpr) astNode() {} +func (n *CallExpr) astNode() {} +func (n *UnaryExpr) astNode() {} +func (n *BinaryExpr) astNode() {} +func (n *ParenExpr) astNode() {} + +func (n *AttributeStmt) astStmt() {} +func (n *BlockStmt) astStmt() {} + +func (n *IdentifierExpr) astExpr() {} +func (n *LiteralExpr) astExpr() {} +func (n *ArrayExpr) astExpr() {} +func (n *ObjectExpr) astExpr() {} +func (n *AccessExpr) astExpr() {} +func (n *IndexExpr) astExpr() {} +func (n *CallExpr) astExpr() {} +func (n *UnaryExpr) astExpr() {} +func (n *BinaryExpr) astExpr() {} +func (n *ParenExpr) astExpr() {} + +// StartPos returns the position of the first character belonging to a Node. +func StartPos(n Node) token.Pos { + if n == nil || reflect.ValueOf(n).IsZero() { + return token.NoPos + } + switch n := n.(type) { + case *File: + return StartPos(n.Body) + case Body: + if len(n) == 0 { + return token.NoPos + } + return StartPos(n[0]) + case CommentGroup: + if len(n) == 0 { + return token.NoPos + } + return StartPos(n[0]) + case *Comment: + return n.StartPos + case *AttributeStmt: + return StartPos(n.Name) + case *BlockStmt: + return n.NamePos + case *Ident: + return n.NamePos + case *IdentifierExpr: + return StartPos(n.Ident) + case *LiteralExpr: + return n.ValuePos + case *ArrayExpr: + return n.LBrackPos + case *ObjectExpr: + return n.LCurlyPos + case *AccessExpr: + return StartPos(n.Value) + case *IndexExpr: + return StartPos(n.Value) + case *CallExpr: + return StartPos(n.Value) + case *UnaryExpr: + return n.KindPos + case *BinaryExpr: + return StartPos(n.Left) + case *ParenExpr: + return n.LParenPos + default: + panic(fmt.Sprintf("Unhandled Node type %T", n)) + } +} + +// EndPos returns the position of the final character in a Node. +func EndPos(n Node) token.Pos { + if n == nil || reflect.ValueOf(n).IsZero() { + return token.NoPos + } + switch n := n.(type) { + case *File: + return EndPos(n.Body) + case Body: + if len(n) == 0 { + return token.NoPos + } + return EndPos(n[len(n)-1]) + case CommentGroup: + if len(n) == 0 { + return token.NoPos + } + return EndPos(n[len(n)-1]) + case *Comment: + return n.StartPos.Add(len(n.Text) - 1) + case *AttributeStmt: + return EndPos(n.Value) + case *BlockStmt: + return n.RCurlyPos + case *Ident: + return n.NamePos.Add(len(n.Name) - 1) + case *IdentifierExpr: + return EndPos(n.Ident) + case *LiteralExpr: + return n.ValuePos.Add(len(n.Value) - 1) + case *ArrayExpr: + return n.RBrackPos + case *ObjectExpr: + return n.RCurlyPos + case *AccessExpr: + return EndPos(n.Name) + case *IndexExpr: + return n.RBrackPos + case *CallExpr: + return n.RParenPos + case *UnaryExpr: + return EndPos(n.Value) + case *BinaryExpr: + return EndPos(n.Right) + case *ParenExpr: + return n.RParenPos + default: + panic(fmt.Sprintf("Unhandled Node type %T", n)) + } +} + +// GetBlockName retrieves the "." delimited block name. +func (block *BlockStmt) GetBlockName() string { + return strings.Join(block.Name, ".") +} diff --git a/syntax/ast/walk.go b/syntax/ast/walk.go new file mode 100644 index 0000000000..df3f82d9a3 --- /dev/null +++ b/syntax/ast/walk.go @@ -0,0 +1,73 @@ +package ast + +import "fmt" + +// A Visitor has its Visit method invoked for each node encountered by Walk. If +// the resulting visitor w is not nil, Walk visits each of the children of node +// with the visitor w, followed by a call of w.Visit(nil). +type Visitor interface { + Visit(node Node) (w Visitor) +} + +// Walk traverses an AST in depth-first order: it starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, Walk is invoked recursively with visitor w for +// each of the non-nil children of node, followed by a call of w.Visit(nil). +func Walk(v Visitor, node Node) { + if v = v.Visit(node); v == nil { + return + } + + // Walk children. The order of the cases matches the declared order of nodes + // in ast.go. + switch n := node.(type) { + case *File: + Walk(v, n.Body) + case Body: + for _, s := range n { + Walk(v, s) + } + case *AttributeStmt: + Walk(v, n.Name) + Walk(v, n.Value) + case *BlockStmt: + Walk(v, n.Body) + case *Ident: + // Nothing to do + case *IdentifierExpr: + Walk(v, n.Ident) + case *LiteralExpr: + // Nothing to do + case *ArrayExpr: + for _, e := range n.Elements { + Walk(v, e) + } + case *ObjectExpr: + for _, f := range n.Fields { + Walk(v, f.Name) + Walk(v, f.Value) + } + case *AccessExpr: + Walk(v, n.Value) + Walk(v, n.Name) + case *IndexExpr: + Walk(v, n.Value) + Walk(v, n.Index) + case *CallExpr: + Walk(v, n.Value) + for _, a := range n.Args { + Walk(v, a) + } + case *UnaryExpr: + Walk(v, n.Value) + case *BinaryExpr: + Walk(v, n.Left) + Walk(v, n.Right) + case *ParenExpr: + Walk(v, n.Inner) + default: + panic(fmt.Sprintf("river/ast: unexpected node type %T", n)) + } + + v.Visit(nil) +} diff --git a/syntax/cmd/riverfmt/main.go b/syntax/cmd/riverfmt/main.go new file mode 100644 index 0000000000..d7b4433f5a --- /dev/null +++ b/syntax/cmd/riverfmt/main.go @@ -0,0 +1,103 @@ +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + + "github.com/grafana/river/diag" + "github.com/grafana/river/parser" + "github.com/grafana/river/printer" +) + +func main() { + err := run() + + var diags diag.Diagnostics + if errors.As(err, &diags) { + for _, diag := range diags { + fmt.Fprintln(os.Stderr, diag) + } + os.Exit(1) + } else if err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } +} + +func run() error { + var ( + write bool + ) + + fs := flag.NewFlagSet("riverfmt", flag.ExitOnError) + fs.BoolVar(&write, "w", write, "write result to (source) file instead of stdout") + + if err := fs.Parse(os.Args[1:]); err != nil { + return err + } + + args := fs.Args() + switch len(args) { + case 0: + if write { + return fmt.Errorf("cannot use -w with standard input") + } + return format("", nil, os.Stdin, write) + + case 1: + fi, err := os.Stat(args[0]) + if err != nil { + return err + } + if fi.IsDir() { + return fmt.Errorf("cannot format a directory") + } + f, err := os.Open(args[0]) + if err != nil { + return err + } + defer f.Close() + return format(args[0], fi, f, write) + + default: + return fmt.Errorf("can only format one file") + } +} + +func format(filename string, fi os.FileInfo, r io.Reader, write bool) error { + bb, err := io.ReadAll(r) + if err != nil { + return err + } + + f, err := parser.ParseFile(filename, bb) + if err != nil { + return err + } + + var buf bytes.Buffer + if err := printer.Fprint(&buf, f); err != nil { + return err + } + + // Add a newline at the end + _, _ = buf.Write([]byte{'\n'}) + + if !write { + _, err := io.Copy(os.Stdout, &buf) + return err + } + + wf, err := os.OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, fi.Mode().Perm()) + if err != nil { + return err + } + defer wf.Close() + + _, err = io.Copy(wf, &buf) + return err +} diff --git a/syntax/diag/diag.go b/syntax/diag/diag.go new file mode 100644 index 0000000000..a49487af61 --- /dev/null +++ b/syntax/diag/diag.go @@ -0,0 +1,95 @@ +// Package diag exposes error types used throughout River and a method to +// pretty-print them to the screen. +package diag + +import ( + "fmt" + + "github.com/grafana/river/token" +) + +// Severity denotes the severity level of a diagnostic. The zero value of +// severity is invalid. +type Severity int + +// Supported severity levels. +const ( + SeverityLevelWarn Severity = iota + 1 + SeverityLevelError +) + +// Diagnostic is an individual diagnostic message. Diagnostic messages can have +// different levels of severities. +type Diagnostic struct { + // Severity holds the severity level of this Diagnostic. + Severity Severity + + // StartPos refers to a position in a file where this Diagnostic starts. + StartPos token.Position + + // EndPos refers to an optional position in a file where this Diagnostic + // ends. If EndPos is the zero value, the Diagnostic should be treated as + // only covering a single character (i.e., StartPos == EndPos). + // + // When defined, EndPos must have the same Filename value as the StartPos. + EndPos token.Position + + Message string + Value string +} + +// As allows d to be interpreted as a list of Diagnostics. +func (d Diagnostic) As(v interface{}) bool { + switch v := v.(type) { + case *Diagnostics: + *v = Diagnostics{d} + return true + } + + return false +} + +// Error implements error. +func (d Diagnostic) Error() string { + return fmt.Sprintf("%s: %s", d.StartPos, d.Message) +} + +// Diagnostics is a collection of diagnostic messages. +type Diagnostics []Diagnostic + +// Add adds an individual Diagnostic to the diagnostics list. +func (ds *Diagnostics) Add(d Diagnostic) { + *ds = append(*ds, d) +} + +// Error implements error. +func (ds Diagnostics) Error() string { + switch len(ds) { + case 0: + return "no errors" + case 1: + return ds[0].Error() + default: + return fmt.Sprintf("%s (and %d more diagnostics)", ds[0], len(ds)-1) + } +} + +// ErrorOrNil returns an error interface if the list diagnostics is non-empty, +// nil otherwise. +func (ds Diagnostics) ErrorOrNil() error { + if len(ds) == 0 { + return nil + } + return ds +} + +// HasErrors reports whether the list of Diagnostics contain any error-level +// diagnostic. +func (ds Diagnostics) HasErrors() bool { + for _, d := range ds { + if d.Severity == SeverityLevelError { + return true + } + } + return false +} diff --git a/syntax/diag/printer.go b/syntax/diag/printer.go new file mode 100644 index 0000000000..03994d68cf --- /dev/null +++ b/syntax/diag/printer.go @@ -0,0 +1,266 @@ +package diag + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + + "github.com/fatih/color" + "github.com/grafana/river/token" +) + +const tabWidth = 4 + +// PrinterConfig controls different settings for the Printer. +type PrinterConfig struct { + // When Color is true, the printer will output with color and special + // formatting characters (such as underlines). + // + // This should be disabled when not printing to a terminal. + Color bool + + // ContextLinesBefore and ContextLinesAfter controls how many context lines + // before and after the range of the diagnostic are printed. + ContextLinesBefore, ContextLinesAfter int +} + +// A Printer pretty-prints Diagnostics. +type Printer struct { + cfg PrinterConfig +} + +// NewPrinter creates a new diagnostics Printer with the provided config. +func NewPrinter(cfg PrinterConfig) *Printer { + return &Printer{cfg: cfg} +} + +// Fprint creates a Printer with default settings and prints diagnostics to the +// provided writer. files is used to look up file contents by name for printing +// diagnostics context. files may be set to nil to avoid printing context. +func Fprint(w io.Writer, files map[string][]byte, diags Diagnostics) error { + p := NewPrinter(PrinterConfig{ + Color: false, + ContextLinesBefore: 1, + ContextLinesAfter: 1, + }) + return p.Fprint(w, files, diags) +} + +// Fprint pretty-prints errors to a writer. files is used to look up file +// contents by name when printing context. files may be nil to avoid printing +// context. +func (p *Printer) Fprint(w io.Writer, files map[string][]byte, diags Diagnostics) error { + // Create a buffered writer since we'll have many small calls to Write while + // we print errors. + // + // Buffers writers track the first write error received and will return it + // (if any) when flushing, so we can ignore write errors throughout the code + // until the very end. + bw := bufio.NewWriter(w) + + for i, diag := range diags { + p.printDiagnosticHeader(bw, diag) + + // If there's no ending position, set the ending position to be the same as + // the start. + if !diag.EndPos.Valid() { + diag.EndPos = diag.StartPos + } + + // We can print the file context if it was found. + fileContents, foundFile := files[diag.StartPos.Filename] + if foundFile && diag.StartPos.Filename == diag.EndPos.Filename { + p.printRange(bw, fileContents, diag) + } + + // Print a blank line to separate diagnostics. + if i+1 < len(diags) { + fmt.Fprintf(bw, "\n") + } + } + + return bw.Flush() +} + +func (p *Printer) printDiagnosticHeader(w io.Writer, diag Diagnostic) { + if p.cfg.Color { + switch diag.Severity { + case SeverityLevelError: + cw := color.New(color.FgRed, color.Bold) + _, _ = cw.Fprintf(w, "Error: ") + case SeverityLevelWarn: + cw := color.New(color.FgYellow, color.Bold) + _, _ = cw.Fprintf(w, "Warning: ") + } + + cw := color.New(color.Bold) + _, _ = cw.Fprintf(w, "%s: %s\n", diag.StartPos, diag.Message) + return + } + + switch diag.Severity { + case SeverityLevelError: + _, _ = fmt.Fprintf(w, "Error: ") + case SeverityLevelWarn: + _, _ = fmt.Fprintf(w, "Warning: ") + } + fmt.Fprintf(w, "%s: %s\n", diag.StartPos, diag.Message) +} + +func (p *Printer) printRange(w io.Writer, file []byte, diag Diagnostic) { + var ( + start = diag.StartPos + end = diag.EndPos + ) + + fmt.Fprintf(w, "\n") + + var ( + lines = strings.Split(string(file), "\n") + + startLine = max(start.Line-p.cfg.ContextLinesBefore, 1) + endLine = min(end.Line+p.cfg.ContextLinesAfter, len(lines)) + + multiline = end.Line-start.Line > 0 + ) + + prefixWidth := len(strconv.Itoa(endLine)) + + for lineNum := startLine; lineNum <= endLine; lineNum++ { + line := lines[lineNum-1] + + // Print line number and margin. + printPaddedNumber(w, prefixWidth, lineNum) + fmt.Fprintf(w, " | ") + + if multiline { + // Use 0 for the column number so we never consider the starting line for + // showing |. + if inRange(lineNum, 0, start, end) { + fmt.Fprint(w, "| ") + } else { + fmt.Fprint(w, " ") + } + } + + // Print the line, but filter out any \r and replace tabs with spaces. + for _, ch := range line { + if ch == '\r' { + continue + } + if ch == '\t' || ch == '\v' { + printCh(w, tabWidth, ' ') + continue + } + fmt.Fprintf(w, "%c", ch) + } + + fmt.Fprintf(w, "\n") + + // Print the focus indicator if we're on a line that needs it. + // + // The focus indicator line must preserve whitespace present in the line + // above it prior to the focus '^' characters. Tab characters are replaced + // with spaces for consistent printing. + if lineNum == start.Line || (multiline && lineNum == end.Line) { + printCh(w, prefixWidth, ' ') // Add empty space where line number would be + + // Print the margin after the blank line number. On multi-line errors, + // the arrow is printed all the way to the margin, with straight + // lines going down in between the lines. + switch { + case multiline && lineNum == start.Line: + // |_ would look like an incorrect right angle, so the second bar + // is dropped. + fmt.Fprintf(w, " | _") + case multiline && lineNum == end.Line: + fmt.Fprintf(w, " | |_") + default: + fmt.Fprintf(w, " | ") + } + + p.printFocus(w, line, lineNum, diag) + fmt.Fprintf(w, "\n") + } + } +} + +// printFocus prints the focus indicator for the line number specified by line. +// The contents of the line should be represented by data so whitespace can be +// retained (injecting spaces where a tab should be, etc.). +func (p *Printer) printFocus(w io.Writer, data string, line int, diag Diagnostic) { + for i, ch := range data { + column := i + 1 + + if line == diag.EndPos.Line && column > diag.EndPos.Column { + // Stop printing the formatting line after printing all the ^. + break + } + + blank := byte(' ') + if diag.EndPos.Line-diag.StartPos.Line > 0 { + blank = byte('_') + } + + switch { + case ch == '\t' || ch == '\v': + printCh(w, tabWidth, blank) + case inRange(line, column, diag.StartPos, diag.EndPos): + fmt.Fprintf(w, "%c", '^') + default: + // Print a space. + fmt.Fprintf(w, "%c", blank) + } + } +} + +func inRange(line, col int, start, end token.Position) bool { + if line < start.Line || line > end.Line { + return false + } + + switch line { + case start.Line: + // If the current line is on the starting line, we have to be past the + // starting column. + return col >= start.Column + case end.Line: + // If the current line is on the ending line, we have to be before the + // final column. + return col <= end.Column + default: + // Otherwise, every column across all the lines in between + // is in the range. + return true + } +} + +func printPaddedNumber(w io.Writer, width int, num int) { + numStr := strconv.Itoa(num) + for i := 0; i < width-len(numStr); i++ { + _, _ = w.Write([]byte{' '}) + } + _, _ = w.Write([]byte(numStr)) +} + +func printCh(w io.Writer, count int, ch byte) { + for i := 0; i < count; i++ { + _, _ = w.Write([]byte{ch}) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/syntax/diag/printer_test.go b/syntax/diag/printer_test.go new file mode 100644 index 0000000000..4558e666ef --- /dev/null +++ b/syntax/diag/printer_test.go @@ -0,0 +1,222 @@ +package diag_test + +import ( + "bytes" + "fmt" + "testing" + + "github.com/grafana/river/diag" + "github.com/grafana/river/token" + "github.com/stretchr/testify/require" +) + +func TestFprint(t *testing.T) { + // In all tests below, the filename is "testfile" and the severity is an + // error. + + tt := []struct { + name string + input string + start, end token.Position + diag diag.Diagnostic + expect string + }{ + { + name: "highlight on same line", + start: token.Position{Line: 2, Column: 2}, + end: token.Position{Line: 2, Column: 5}, + input: `test.block "label" { + attr = 1 + other_attr = 2 +}`, + expect: `Error: testfile:2:2: synthetic error + +1 | test.block "label" { +2 | attr = 1 + | ^^^^ +3 | other_attr = 2 +`, + }, + + { + name: "end positions should be optional", + start: token.Position{Line: 1, Column: 4}, + input: `foo,bar`, + expect: `Error: testfile:1:4: synthetic error + +1 | foo,bar + | ^ +`, + }, + + { + name: "padding should be inserted to fit line numbers of different lengths", + start: token.Position{Line: 9, Column: 1}, + end: token.Position{Line: 9, Column: 6}, + input: `LINE_1 +LINE_2 +LINE_3 +LINE_4 +LINE_5 +LINE_6 +LINE_7 +LINE_8 +LINE_9 +LINE_10 +LINE_11`, + expect: `Error: testfile:9:1: synthetic error + + 8 | LINE_8 + 9 | LINE_9 + | ^^^^^^ +10 | LINE_10 +`, + }, + + { + name: "errors which cross multiple lines can be printed from start of line", + start: token.Position{Line: 2, Column: 1}, + end: token.Position{Line: 6, Column: 7}, + input: `FILE_BEGIN +START +TEXT + TEXT + TEXT + DONE after +FILE_END`, + expect: `Error: testfile:2:1: synthetic error + +1 | FILE_BEGIN +2 | START + | _^^^^^ +3 | | TEXT +4 | | TEXT +5 | | TEXT +6 | | DONE after + | |_____________^^^^ +7 | FILE_END +`, + }, + + { + name: "errors which cross multiple lines can be printed from middle of line", + start: token.Position{Line: 2, Column: 8}, + end: token.Position{Line: 6, Column: 7}, + input: `FILE_BEGIN +before START +TEXT + TEXT + TEXT + DONE after +FILE_END`, + expect: `Error: testfile:2:8: synthetic error + +1 | FILE_BEGIN +2 | before START + | ________^^^^^ +3 | | TEXT +4 | | TEXT +5 | | TEXT +6 | | DONE after + | |_____________^^^^ +7 | FILE_END +`, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + files := map[string][]byte{ + "testfile": []byte(tc.input), + } + + tc.start.Filename = "testfile" + tc.end.Filename = "testfile" + + diags := diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: tc.start, + EndPos: tc.end, + Message: "synthetic error", + }} + + var buf bytes.Buffer + _ = diag.Fprint(&buf, files, diags) + requireEqualStrings(t, tc.expect, buf.String()) + }) + } +} + +func TestFprint_MultipleDiagnostics(t *testing.T) { + fileA := `old_field = 15 +3 & 4` + fileB := `old_field = 22` + + files := map[string][]byte{ + "file_a": []byte(fileA), + "file_b": []byte(fileB), + } + + diags := diag.Diagnostics{ + { + Severity: diag.SeverityLevelWarn, + StartPos: token.Position{Filename: "file_a", Line: 1, Column: 1}, + EndPos: token.Position{Filename: "file_a", Line: 1, Column: 9}, + Message: "old_field is deprecated", + }, + { + Severity: diag.SeverityLevelError, + StartPos: token.Position{Filename: "file_a", Line: 2, Column: 3}, + Message: "unrecognized operator &", + }, + { + Severity: diag.SeverityLevelWarn, + StartPos: token.Position{Filename: "file_b", Line: 1, Column: 1}, + EndPos: token.Position{Filename: "file_b", Line: 1, Column: 9}, + Message: "old_field is deprecated", + }, + } + + expect := `Warning: file_a:1:1: old_field is deprecated + +1 | old_field = 15 + | ^^^^^^^^^ +2 | 3 & 4 + +Error: file_a:2:3: unrecognized operator & + +1 | old_field = 15 +2 | 3 & 4 + | ^ + +Warning: file_b:1:1: old_field is deprecated + +1 | old_field = 22 + | ^^^^^^^^^ +` + + var buf bytes.Buffer + _ = diag.Fprint(&buf, files, diags) + requireEqualStrings(t, expect, buf.String()) +} + +// requireEqualStrings is like require.Equal with two strings but it +// pretty-prints multiline strings to make it easier to compare. +func requireEqualStrings(t *testing.T, expected, actual string) { + if expected == actual { + return + } + + msg := fmt.Sprintf( + "Not equal:\n"+ + "raw expected: %#v\n"+ + "raw actual : %#v\n"+ + "\n"+ + "expected:\n%s\n"+ + "actual:\n%s\n", + expected, actual, + expected, actual, + ) + + require.Fail(t, msg) +} diff --git a/syntax/encoding/riverjson/riverjson.go b/syntax/encoding/riverjson/riverjson.go new file mode 100644 index 0000000000..fc69882918 --- /dev/null +++ b/syntax/encoding/riverjson/riverjson.go @@ -0,0 +1,313 @@ +// Package riverjson encodes River as JSON. +package riverjson + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token/builder" +) + +var goRiverDefaulter = reflect.TypeOf((*value.Defaulter)(nil)).Elem() + +// MarshalBody marshals the provided Go value to a JSON representation of +// River. MarshalBody panics if not given a struct with River tags or a map[string]any. +func MarshalBody(val interface{}) ([]byte, error) { + rv := reflect.ValueOf(val) + return json.Marshal(encodeStructAsBody(rv)) +} + +func encodeStructAsBody(rv reflect.Value) jsonBody { + for rv.Kind() == reflect.Pointer { + if rv.IsNil() { + return jsonBody{} + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Invalid { + return jsonBody{} + } + + body := jsonBody{} + + switch rv.Kind() { + case reflect.Struct: + fields := rivertags.Get(rv.Type()) + defaults := reflect.New(rv.Type()).Elem() + if defaults.CanAddr() && defaults.Addr().Type().Implements(goRiverDefaulter) { + defaults.Addr().Interface().(value.Defaulter).SetToDefault() + } + + for _, field := range fields { + fieldVal := reflectutil.Get(rv, field) + fieldValDefault := reflectutil.Get(defaults, field) + + isEqual := fieldVal.Comparable() && fieldVal.Equal(fieldValDefault) + isZero := fieldValDefault.IsZero() && fieldVal.IsZero() + + if field.IsOptional() && (isEqual || isZero) { + continue + } + + body = append(body, encodeFieldAsStatements(nil, field, fieldVal)...) + } + + case reflect.Map: + if rv.Type().Key().Kind() != reflect.String { + panic("river/encoding/riverjson: unsupported map type; expected map[string]T, got " + rv.Type().String()) + } + + iter := rv.MapRange() + for iter.Next() { + mapKey, mapValue := iter.Key(), iter.Value() + + body = append(body, jsonAttr{ + Name: mapKey.String(), + Type: "attr", + Value: buildJSONValue(value.FromRaw(mapValue)), + }) + } + + default: + panic(fmt.Sprintf("river/encoding/riverjson: can only encode struct or map[string]T values to bodies, got %s", rv.Kind())) + } + + return body +} + +// encodeFieldAsStatements encodes an individual field from a struct as a set +// of statements. One field may map to multiple statements in the case of a +// slice of blocks. +func encodeFieldAsStatements(prefix []string, field rivertags.Field, fieldValue reflect.Value) []jsonStatement { + fieldName := strings.Join(field.Name, ".") + + for fieldValue.Kind() == reflect.Pointer { + if fieldValue.IsNil() { + break + } + fieldValue = fieldValue.Elem() + } + + switch { + case field.IsAttr(): + return []jsonStatement{jsonAttr{ + Name: fieldName, + Type: "attr", + Value: buildJSONValue(value.FromRaw(fieldValue)), + }} + + case field.IsBlock(): + fullName := mergeStringSlice(prefix, field.Name) + + switch { + case fieldValue.Kind() == reflect.Map: + // Iterate over the map and add each element as an attribute into it. + + if fieldValue.Type().Key().Kind() != reflect.String { + panic("river/encoding/riverjson: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) + } + + statements := []jsonStatement{} + + iter := fieldValue.MapRange() + for iter.Next() { + mapKey, mapValue := iter.Key(), iter.Value() + + statements = append(statements, jsonAttr{ + Name: mapKey.String(), + Type: "attr", + Value: buildJSONValue(value.FromRaw(mapValue)), + }) + } + + return []jsonStatement{jsonBlock{ + Name: strings.Join(fullName, "."), + Type: "block", + Body: statements, + }} + + case fieldValue.Kind() == reflect.Slice, fieldValue.Kind() == reflect.Array: + statements := []jsonStatement{} + + for i := 0; i < fieldValue.Len(); i++ { + elem := fieldValue.Index(i) + + // Recursively call encodeField for each element in the slice/array. + // The recursive call will hit the case below and add a new block for + // each field encountered. + statements = append(statements, encodeFieldAsStatements(prefix, field, elem)...) + } + + return statements + + case fieldValue.Kind() == reflect.Struct: + if fieldValue.IsZero() { + // It shouldn't be possible to have a required block which is unset, but + // we'll encode something anyway. + return []jsonStatement{jsonBlock{ + Name: strings.Join(fullName, "."), + Type: "block", + + // Never set this to nil, since the API contract always expects blocks + // to have an array value for the body. + Body: []jsonStatement{}, + }} + } + + return []jsonStatement{jsonBlock{ + Name: strings.Join(fullName, "."), + Type: "block", + Label: getBlockLabel(fieldValue), + Body: encodeStructAsBody(fieldValue), + }} + } + + case field.IsEnum(): + // Blocks within an enum have a prefix set. + newPrefix := mergeStringSlice(prefix, field.Name) + + switch { + case fieldValue.Kind() == reflect.Slice, fieldValue.Kind() == reflect.Array: + statements := []jsonStatement{} + for i := 0; i < fieldValue.Len(); i++ { + statements = append(statements, encodeEnumElementToStatements(newPrefix, fieldValue.Index(i))...) + } + return statements + + default: + panic(fmt.Sprintf("river/encoding/riverjson: unrecognized enum kind %s", fieldValue.Kind())) + } + } + + return nil +} + +func mergeStringSlice(a, b []string) []string { + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + res := make([]string, 0, len(a)+len(b)) + res = append(res, a...) + res = append(res, b...) + return res +} + +// getBlockLabel returns the label for a given block. +func getBlockLabel(rv reflect.Value) string { + tags := rivertags.Get(rv.Type()) + for _, tag := range tags { + if tag.Flags&rivertags.FlagLabel != 0 { + return reflectutil.Get(rv, tag).String() + } + } + + return "" +} + +func encodeEnumElementToStatements(prefix []string, enumElement reflect.Value) []jsonStatement { + for enumElement.Kind() == reflect.Pointer { + if enumElement.IsNil() { + return nil + } + enumElement = enumElement.Elem() + } + + fields := rivertags.Get(enumElement.Type()) + + statements := []jsonStatement{} + + // Find the first non-zero field and encode it. + for _, field := range fields { + fieldVal := reflectutil.Get(enumElement, field) + if !fieldVal.IsValid() || fieldVal.IsZero() { + continue + } + + statements = append(statements, encodeFieldAsStatements(prefix, field, fieldVal)...) + break + } + + return statements +} + +// MarshalValue marshals the provided Go value to a JSON representation of +// River. +func MarshalValue(val interface{}) ([]byte, error) { + riverValue := value.Encode(val) + return json.Marshal(buildJSONValue(riverValue)) +} + +func buildJSONValue(v value.Value) jsonValue { + if tk, ok := v.Interface().(builder.Tokenizer); ok { + return jsonValue{ + Type: "capsule", + Value: tk.RiverTokenize()[0].Lit, + } + } + + switch v.Type() { + case value.TypeNull: + return jsonValue{Type: "null"} + + case value.TypeNumber: + return jsonValue{Type: "number", Value: v.Number().Float()} + + case value.TypeString: + return jsonValue{Type: "string", Value: v.Text()} + + case value.TypeBool: + return jsonValue{Type: "bool", Value: v.Bool()} + + case value.TypeArray: + elements := []interface{}{} + + for i := 0; i < v.Len(); i++ { + element := v.Index(i) + + elements = append(elements, buildJSONValue(element)) + } + + return jsonValue{Type: "array", Value: elements} + + case value.TypeObject: + keys := v.Keys() + + // If v isn't an ordered object (i.e., a go map), sort the keys so they + // have a deterministic print order. + if !v.OrderedKeys() { + sort.Strings(keys) + } + + fields := []jsonObjectField{} + + for i := 0; i < len(keys); i++ { + field, _ := v.Key(keys[i]) + + fields = append(fields, jsonObjectField{ + Key: keys[i], + Value: buildJSONValue(field), + }) + } + + return jsonValue{Type: "object", Value: fields} + + case value.TypeFunction: + return jsonValue{Type: "function", Value: v.Describe()} + + case value.TypeCapsule: + return jsonValue{Type: "capsule", Value: v.Describe()} + + default: + panic(fmt.Sprintf("river/encoding/riverjson: unrecognized value type %q", v.Type())) + } +} diff --git a/syntax/encoding/riverjson/riverjson_test.go b/syntax/encoding/riverjson/riverjson_test.go new file mode 100644 index 0000000000..0eeb321b59 --- /dev/null +++ b/syntax/encoding/riverjson/riverjson_test.go @@ -0,0 +1,363 @@ +package riverjson_test + +import ( + "testing" + + river "github.com/grafana/river" + "github.com/grafana/river/encoding/riverjson" + "github.com/grafana/river/rivertypes" + "github.com/stretchr/testify/require" +) + +func TestValues(t *testing.T) { + tt := []struct { + name string + input interface{} + expectJSON string + }{ + { + name: "null", + input: nil, + expectJSON: `{ "type": "null", "value": null }`, + }, + { + name: "number", + input: 54, + expectJSON: `{ "type": "number", "value": 54 }`, + }, + { + name: "string", + input: "Hello, world!", + expectJSON: `{ "type": "string", "value": "Hello, world!" }`, + }, + { + name: "bool", + input: true, + expectJSON: `{ "type": "bool", "value": true }`, + }, + { + name: "simple array", + input: []int{0, 1, 2, 3, 4}, + expectJSON: `{ + "type": "array", + "value": [ + { "type": "number", "value": 0 }, + { "type": "number", "value": 1 }, + { "type": "number", "value": 2 }, + { "type": "number", "value": 3 }, + { "type": "number", "value": 4 } + ] + }`, + }, + { + name: "nested array", + input: []interface{}{"testing", []int{0, 1, 2}}, + expectJSON: `{ + "type": "array", + "value": [ + { "type": "string", "value": "testing" }, + { + "type": "array", + "value": [ + { "type": "number", "value": 0 }, + { "type": "number", "value": 1 }, + { "type": "number", "value": 2 } + ] + } + ] + }`, + }, + { + name: "object", + input: map[string]any{"foo": "bar", "fizz": "buzz", "year": 2023}, + expectJSON: `{ + "type": "object", + "value": [ + { "key": "fizz", "value": { "type": "string", "value": "buzz" }}, + { "key": "foo", "value": { "type": "string", "value": "bar" }}, + { "key": "year", "value": { "type": "number", "value": 2023 }} + ] + }`, + }, + { + name: "function", + input: func(i int) int { return i * 2 }, + expectJSON: `{ "type": "function", "value": "function" }`, + }, + { + name: "capsule", + input: rivertypes.Secret("foo"), + expectJSON: `{ "type": "capsule", "value": "(secret)" }`, + }, + { + // nil arrays and objects must always be [] instead of null as that's + // what the API definition says they should be. + name: "nil array", + input: ([]any)(nil), + expectJSON: `{ "type": "array", "value": [] }`, + }, + { + // nil arrays and objects must always be [] instead of null as that's + // what the API definition says they should be. + name: "nil object", + input: (map[string]any)(nil), + expectJSON: `{ "type": "object", "value": [] }`, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + actual, err := riverjson.MarshalValue(tc.input) + require.NoError(t, err) + require.JSONEq(t, tc.expectJSON, string(actual)) + }) + } +} + +func TestBlock(t *testing.T) { + // Zero values should be omitted from result. + + val := testBlock{ + Number: 5, + Array: []any{1, 2, 3}, + Labeled: []labeledBlock{ + { + TestBlock: testBlock{Boolean: true}, + Label: "label_a", + }, + { + TestBlock: testBlock{String: "foo"}, + Label: "label_b", + }, + }, + Blocks: []testBlock{ + {String: "hello"}, + {String: "world"}, + }, + } + + expect := `[ + { + "name": "number", + "type": "attr", + "value": { "type": "number", "value": 5 } + }, + { + "name": "array", + "type": "attr", + "value": { + "type": "array", + "value": [ + { "type": "number", "value": 1 }, + { "type": "number", "value": 2 }, + { "type": "number", "value": 3 } + ] + } + }, + { + "name": "labeled_block", + "type": "block", + "label": "label_a", + "body": [{ + "name": "boolean", + "type": "attr", + "value": { "type": "bool", "value": true } + }] + }, + { + "name": "labeled_block", + "type": "block", + "label": "label_b", + "body": [{ + "name": "string", + "type": "attr", + "value": { "type": "string", "value": "foo" } + }] + }, + { + "name": "inner_block", + "type": "block", + "body": [{ + "name": "string", + "type": "attr", + "value": { "type": "string", "value": "hello" } + }] + }, + { + "name": "inner_block", + "type": "block", + "body": [{ + "name": "string", + "type": "attr", + "value": { "type": "string", "value": "world" } + }] + } + ]` + + actual, err := riverjson.MarshalBody(val) + require.NoError(t, err) + require.JSONEq(t, expect, string(actual)) +} + +func TestBlock_Empty_Required_Block_Slice(t *testing.T) { + type wrapper struct { + Blocks []testBlock `river:"some_block,block"` + } + + tt := []struct { + name string + val any + }{ + {"nil block slice", wrapper{Blocks: nil}}, + {"empty block slice", wrapper{Blocks: []testBlock{}}}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expect := `[]` + + actual, err := riverjson.MarshalBody(tc.val) + require.NoError(t, err) + require.JSONEq(t, expect, string(actual)) + }) + } +} + +type testBlock struct { + Number int `river:"number,attr,optional"` + String string `river:"string,attr,optional"` + Boolean bool `river:"boolean,attr,optional"` + Array []any `river:"array,attr,optional"` + Object map[string]any `river:"object,attr,optional"` + + Labeled []labeledBlock `river:"labeled_block,block,optional"` + Blocks []testBlock `river:"inner_block,block,optional"` +} + +type labeledBlock struct { + TestBlock testBlock `river:",squash"` + Label string `river:",label"` +} + +func TestNilBody(t *testing.T) { + actual, err := riverjson.MarshalBody(nil) + require.NoError(t, err) + require.JSONEq(t, `[]`, string(actual)) +} + +func TestEmptyBody(t *testing.T) { + type block struct{} + + actual, err := riverjson.MarshalBody(block{}) + require.NoError(t, err) + require.JSONEq(t, `[]`, string(actual)) +} + +func TestHideDefaults(t *testing.T) { + tt := []struct { + name string + val defaultsBlock + expectJSON string + }{ + { + name: "no defaults", + val: defaultsBlock{ + Name: "Jane", + Age: 41, + }, + expectJSON: `[ + { "name": "name", "type": "attr", "value": { "type": "string", "value": "Jane" }}, + { "name": "age", "type": "attr", "value": { "type": "number", "value": 41 }} + ]`, + }, + { + name: "some defaults", + val: defaultsBlock{ + Name: "John Doe", + Age: 41, + }, + expectJSON: `[ + { "name": "age", "type": "attr", "value": { "type": "number", "value": 41 }} + ]`, + }, + { + name: "all defaults", + val: defaultsBlock{ + Name: "John Doe", + Age: 35, + }, + expectJSON: `[]`, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + actual, err := riverjson.MarshalBody(tc.val) + require.NoError(t, err) + require.JSONEq(t, tc.expectJSON, string(actual)) + }) + } +} + +type defaultsBlock struct { + Name string `river:"name,attr,optional"` + Age int `river:"age,attr,optional"` +} + +var _ river.Defaulter = (*defaultsBlock)(nil) + +func (d *defaultsBlock) SetToDefault() { + *d = defaultsBlock{ + Name: "John Doe", + Age: 35, + } +} + +func TestMapBlocks(t *testing.T) { + type block struct { + Value map[string]any `river:"block,block,optional"` + } + val := block{Value: map[string]any{"field": "value"}} + + expect := `[{ + "name": "block", + "type": "block", + "body": [{ + "name": "field", + "type": "attr", + "value": { "type": "string", "value": "value" } + }] + }]` + + bb, err := riverjson.MarshalBody(val) + require.NoError(t, err) + require.JSONEq(t, expect, string(bb)) +} + +func TestRawMap(t *testing.T) { + val := map[string]any{"field": "value"} + + expect := `[{ + "name": "field", + "type": "attr", + "value": { "type": "string", "value": "value" } + }]` + + bb, err := riverjson.MarshalBody(val) + require.NoError(t, err) + require.JSONEq(t, expect, string(bb)) +} + +func TestRawMap_Capsule(t *testing.T) { + val := map[string]any{"capsule": rivertypes.Secret("foo")} + + expect := `[{ + "name": "capsule", + "type": "attr", + "value": { "type": "capsule", "value": "(secret)" } + }]` + + bb, err := riverjson.MarshalBody(val) + require.NoError(t, err) + require.JSONEq(t, expect, string(bb)) +} diff --git a/syntax/encoding/riverjson/types.go b/syntax/encoding/riverjson/types.go new file mode 100644 index 0000000000..3170331e46 --- /dev/null +++ b/syntax/encoding/riverjson/types.go @@ -0,0 +1,41 @@ +package riverjson + +// Various concrete types used to marshal River values. +type ( + // jsonStatement is a statement within a River body. + jsonStatement interface{ isStatement() } + + // A jsonBody is a collection of statements. + jsonBody = []jsonStatement + + // jsonBlock represents a River block as JSON. jsonBlock is a jsonStatement. + jsonBlock struct { + Name string `json:"name"` + Type string `json:"type"` // Always "block" + Label string `json:"label,omitempty"` + Body []jsonStatement `json:"body"` + } + + // jsonAttr represents a River attribute as JSON. jsonAttr is a + // jsonStatement. + jsonAttr struct { + Name string `json:"name"` + Type string `json:"type"` // Always "attr" + Value jsonValue `json:"value"` + } + + // jsonValue represents a single River value as JSON. + jsonValue struct { + Type string `json:"type"` + Value interface{} `json:"value"` + } + + // jsonObjectField represents a field within a River object. + jsonObjectField struct { + Key string `json:"key"` + Value interface{} `json:"value"` + } +) + +func (jsonBlock) isStatement() {} +func (jsonAttr) isStatement() {} diff --git a/syntax/go.mod b/syntax/go.mod new file mode 100644 index 0000000000..6b85c2e5da --- /dev/null +++ b/syntax/go.mod @@ -0,0 +1,18 @@ +module github.com/grafana/river + +go 1.21.0 + +require ( + github.com/fatih/color v1.15.0 + github.com/ohler55/ojg v1.20.1 + github.com/stretchr/testify v1.8.4 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/sys v0.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/syntax/go.sum b/syntax/go.sum new file mode 100644 index 0000000000..a972b9838b --- /dev/null +++ b/syntax/go.sum @@ -0,0 +1,22 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/ohler55/ojg v1.20.1 h1:Io65sHjMjYPI7yuhUr8VdNmIQdYU6asKeFhOs8xgBnY= +github.com/ohler55/ojg v1.20.1/go.mod h1:uHcD1ErbErC27Zhb5Df2jUjbseLLcmOCo6oxSr3jZxo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/syntax/internal/reflectutil/walk.go b/syntax/internal/reflectutil/walk.go new file mode 100644 index 0000000000..17cbb25d49 --- /dev/null +++ b/syntax/internal/reflectutil/walk.go @@ -0,0 +1,89 @@ +package reflectutil + +import ( + "reflect" + + "github.com/grafana/river/internal/rivertags" +) + +// GetOrAlloc returns the nested field of value corresponding to index. +// GetOrAlloc panics if not given a struct. +func GetOrAlloc(value reflect.Value, field rivertags.Field) reflect.Value { + return GetOrAllocIndex(value, field.Index) +} + +// GetOrAllocIndex returns the nested field of value corresponding to index. +// GetOrAllocIndex panics if not given a struct. +// +// It is similar to [reflect/Value.FieldByIndex] but can handle traversing +// through nil pointers. If allocate is true, GetOrAllocIndex allocates any +// intermediate nil pointers while traversing the struct. +func GetOrAllocIndex(value reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return value.Field(index[0]) + } + + if value.Kind() != reflect.Struct { + panic("GetOrAlloc must be given a Struct, but found " + value.Kind().String()) + } + + for _, next := range index { + value = deferencePointer(value).Field(next) + } + + return value +} + +func deferencePointer(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Pointer { + if value.IsNil() { + value.Set(reflect.New(value.Type().Elem())) + } + value = value.Elem() + } + + return value +} + +// Get returns the nested field of value corresponding to index. Get panics if +// not given a struct. +// +// It is similar to [reflect/Value.FieldByIndex] but can handle traversing +// through nil pointers. If Get traverses through a nil pointer, a non-settable +// zero value for the final field is returned. +func Get(value reflect.Value, field rivertags.Field) reflect.Value { + if len(field.Index) == 1 { + return value.Field(field.Index[0]) + } + + if value.Kind() != reflect.Struct { + panic("Get must be given a Struct, but found " + value.Kind().String()) + } + + for i, next := range field.Index { + for value.Kind() == reflect.Pointer { + if value.IsNil() { + return getZero(value, field.Index[i:]) + } + value = value.Elem() + } + + value = value.Field(next) + } + + return value +} + +// getZero returns a non-settable zero value while walking value. +func getZero(value reflect.Value, index []int) reflect.Value { + typ := value.Type() + + for _, next := range index { + for typ.Kind() == reflect.Pointer { + typ = typ.Elem() + } + typ = typ.Field(next).Type + } + + return reflect.Zero(typ) +} diff --git a/syntax/internal/reflectutil/walk_test.go b/syntax/internal/reflectutil/walk_test.go new file mode 100644 index 0000000000..f536770e5a --- /dev/null +++ b/syntax/internal/reflectutil/walk_test.go @@ -0,0 +1,72 @@ +package reflectutil_test + +import ( + "reflect" + "testing" + + "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/river/internal/rivertags" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDeeplyNested_Access(t *testing.T) { + type Struct struct { + Field1 struct { + Field2 struct { + Field3 struct { + Value string + } + } + } + } + + var s Struct + s.Field1.Field2.Field3.Value = "Hello, world!" + + rv := reflect.ValueOf(&s).Elem() + innerValue := reflectutil.GetOrAlloc(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + assert.True(t, innerValue.CanSet()) + assert.Equal(t, reflect.String, innerValue.Kind()) +} + +func TestDeeplyNested_Allocate(t *testing.T) { + type Struct struct { + Field1 *struct { + Field2 *struct { + Field3 *struct { + Value string + } + } + } + } + + var s Struct + + rv := reflect.ValueOf(&s).Elem() + innerValue := reflectutil.GetOrAlloc(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + require.True(t, innerValue.CanSet()) + require.Equal(t, reflect.String, innerValue.Kind()) + + innerValue.Set(reflect.ValueOf("Hello, world!")) + require.Equal(t, "Hello, world!", s.Field1.Field2.Field3.Value) +} + +func TestDeeplyNested_NoAllocate(t *testing.T) { + type Struct struct { + Field1 *struct { + Field2 *struct { + Field3 *struct { + Value string + } + } + } + } + + var s Struct + + rv := reflect.ValueOf(&s).Elem() + innerValue := reflectutil.Get(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + assert.False(t, innerValue.CanSet()) + assert.Equal(t, reflect.String, innerValue.Kind()) +} diff --git a/syntax/internal/rivertags/rivertags.go b/syntax/internal/rivertags/rivertags.go new file mode 100644 index 0000000000..8186a644e0 --- /dev/null +++ b/syntax/internal/rivertags/rivertags.go @@ -0,0 +1,346 @@ +// Package rivertags decodes a struct type into river object +// and structural tags. +package rivertags + +import ( + "fmt" + "reflect" + "strings" +) + +// Flags is a bitmap of flags associated with a field on a struct. +type Flags uint + +// Valid flags. +const ( + FlagAttr Flags = 1 << iota // FlagAttr treats a field as attribute + FlagBlock // FlagBlock treats a field as a block + FlagEnum // FlagEnum treats a field as an enum of blocks + + FlagOptional // FlagOptional marks a field optional for decoding/encoding + FlagLabel // FlagLabel will store block labels in the field + FlagSquash // FlagSquash will expose inner fields from a struct as outer fields. +) + +// String returns the flags as a string. +func (f Flags) String() string { + attrs := make([]string, 0, 5) + + if f&FlagAttr != 0 { + attrs = append(attrs, "attr") + } + if f&FlagBlock != 0 { + attrs = append(attrs, "block") + } + if f&FlagEnum != 0 { + attrs = append(attrs, "enum") + } + if f&FlagOptional != 0 { + attrs = append(attrs, "optional") + } + if f&FlagLabel != 0 { + attrs = append(attrs, "label") + } + if f&FlagSquash != 0 { + attrs = append(attrs, "squash") + } + + return fmt.Sprintf("Flags(%s)", strings.Join(attrs, ",")) +} + +// GoString returns the %#v format of Flags. +func (f Flags) GoString() string { return f.String() } + +// Field is a tagged field within a struct. +type Field struct { + Name []string // Name of tagged field. + Index []int // Index into field. Use [reflectutil.GetOrAlloc] to retrieve a Value. + Flags Flags // Flags assigned to field. +} + +// Equals returns true if two fields are equal. +func (f Field) Equals(other Field) bool { + // Compare names + { + if len(f.Name) != len(other.Name) { + return false + } + + for i := 0; i < len(f.Name); i++ { + if f.Name[i] != other.Name[i] { + return false + } + } + } + + // Compare index. + { + if len(f.Index) != len(other.Index) { + return false + } + + for i := 0; i < len(f.Index); i++ { + if f.Index[i] != other.Index[i] { + return false + } + } + } + + // Finally, compare flags. + return f.Flags == other.Flags +} + +// IsAttr returns whether f is for an attribute. +func (f Field) IsAttr() bool { return f.Flags&FlagAttr != 0 } + +// IsBlock returns whether f is for a block. +func (f Field) IsBlock() bool { return f.Flags&FlagBlock != 0 } + +// IsEnum returns whether f represents an enum of blocks, where only one block +// is set at a time. +func (f Field) IsEnum() bool { return f.Flags&FlagEnum != 0 } + +// IsOptional returns whether f is optional. +func (f Field) IsOptional() bool { return f.Flags&FlagOptional != 0 } + +// IsLabel returns whether f is label. +func (f Field) IsLabel() bool { return f.Flags&FlagLabel != 0 } + +// Get returns the list of tagged fields for some struct type ty. Get panics if +// ty is not a struct type. +// +// Get examines each tagged field in ty for a river key. The river key is then +// parsed as containing a name for the field, followed by a required +// comma-separated list of options. The name may be empty for fields which do +// not require a name. Get will ignore any field that is not tagged with a +// river key. +// +// Get will treat anonymous struct fields as if the inner fields were fields in +// the outer struct. +// +// Examples of struct field tags and their meanings: +// +// // Field is used as a required block named "my_block". +// Field struct{} `river:"my_block,block"` +// +// // Field is used as an optional block named "my_block". +// Field struct{} `river:"my_block,block,optional"` +// +// // Field is used as a required attribute named "my_attr". +// Field string `river:"my_attr,attr"` +// +// // Field is used as an optional attribute named "my_attr". +// Field string `river:"my_attr,attr,optional"` +// +// // Field is used for storing the label of the block which the struct +// // represents. +// Field string `river:",label"` +// +// // Attributes and blocks inside of Field are exposed as top-level fields. +// Field struct{} `river:",squash"` +// +// Blocks []struct{} `river:"my_block_prefix,enum"` +// +// With the exception of the `river:",label"` and `river:",squash" tags, all +// tagged fields must have a unique name. +// +// The type of tagged fields may be any Go type, with the exception of +// `river:",label"` tags, which must be strings. +func Get(ty reflect.Type) []Field { + if k := ty.Kind(); k != reflect.Struct { + panic(fmt.Sprintf("rivertags: Get requires struct kind, got %s", k)) + } + + var ( + fields []Field + + usedNames = make(map[string][]int) + usedLabelField = []int(nil) + ) + + for _, field := range reflect.VisibleFields(ty) { + // River does not support embedding of fields + if field.Anonymous { + panic(fmt.Sprintf("river: anonymous fields not supported %s", printPathToField(ty, field.Index))) + } + + tag, tagged := field.Tag.Lookup("river") + if !tagged { + continue + } + + if !field.IsExported() { + panic(fmt.Sprintf("river: river tag found on unexported field at %s", printPathToField(ty, field.Index))) + } + + options := strings.SplitN(tag, ",", 2) + if len(options) == 0 { + panic(fmt.Sprintf("river: unsupported empty tag at %s", printPathToField(ty, field.Index))) + } + if len(options) != 2 { + panic(fmt.Sprintf("river: field %s tag is missing options", printPathToField(ty, field.Index))) + } + + fullName := options[0] + + tf := Field{ + Name: strings.Split(fullName, "."), + Index: field.Index, + } + + if first, used := usedNames[fullName]; used && fullName != "" { + panic(fmt.Sprintf("river: field name %s already used by %s", fullName, printPathToField(ty, first))) + } + usedNames[fullName] = tf.Index + + flags, ok := parseFlags(options[1]) + if !ok { + panic(fmt.Sprintf("river: unrecognized river tag format %q at %s", tag, printPathToField(ty, tf.Index))) + } + tf.Flags = flags + + if len(tf.Name) > 1 && tf.Flags&(FlagBlock|FlagEnum) == 0 { + panic(fmt.Sprintf("river: field names with `.` may only be used by blocks or enums (found at %s)", printPathToField(ty, tf.Index))) + } + + if tf.Flags&FlagEnum != 0 { + if err := validateEnum(field); err != nil { + panic(err) + } + } + + if tf.Flags&FlagLabel != 0 { + if fullName != "" { + panic(fmt.Sprintf("river: label field at %s must not have a name", printPathToField(ty, tf.Index))) + } + if field.Type.Kind() != reflect.String { + panic(fmt.Sprintf("river: label field at %s must be a string", printPathToField(ty, tf.Index))) + } + + if usedLabelField != nil { + panic(fmt.Sprintf("river: label field already used by %s", printPathToField(ty, tf.Index))) + } + usedLabelField = tf.Index + } + + if tf.Flags&FlagSquash != 0 { + if fullName != "" { + panic(fmt.Sprintf("river: squash field at %s must not have a name", printPathToField(ty, tf.Index))) + } + + innerType := deferenceType(field.Type) + + switch { + case isStructType(innerType): // Squashed struct + // Get the inner fields from the squashed struct and append each of them. + // The index of the squashed field is prepended to the index of the inner + // struct. + innerFields := Get(deferenceType(field.Type)) + for _, innerField := range innerFields { + fields = append(fields, Field{ + Name: innerField.Name, + Index: append(field.Index, innerField.Index...), + Flags: innerField.Flags, + }) + } + + default: + panic(fmt.Sprintf("rivertags: squash field requires struct, got %s", innerType)) + } + + continue + } + + if fullName == "" && tf.Flags&(FlagLabel|FlagSquash) == 0 /* (e.g., *not* a label or squash) */ { + panic(fmt.Sprintf("river: non-empty field name required at %s", printPathToField(ty, tf.Index))) + } + + fields = append(fields, tf) + } + + return fields +} + +func parseFlags(input string) (f Flags, ok bool) { + switch input { + case "attr": + f |= FlagAttr + case "attr,optional": + f |= FlagAttr | FlagOptional + case "block": + f |= FlagBlock + case "block,optional": + f |= FlagBlock | FlagOptional + case "enum": + f |= FlagEnum + case "enum,optional": + f |= FlagEnum | FlagOptional + case "label": + f |= FlagLabel + case "squash": + f |= FlagSquash + default: + return + } + + return f, true +} + +func printPathToField(structTy reflect.Type, path []int) string { + var sb strings.Builder + + sb.WriteString(structTy.String()) + sb.WriteString(".") + + cur := structTy + for i, elem := range path { + sb.WriteString(cur.Field(elem).Name) + + if i+1 < len(path) { + sb.WriteString(".") + } + + cur = cur.Field(i).Type + } + + return sb.String() +} + +func deferenceType(ty reflect.Type) reflect.Type { + for ty.Kind() == reflect.Pointer { + ty = ty.Elem() + } + return ty +} + +func isStructType(ty reflect.Type) bool { + return ty.Kind() == reflect.Struct +} + +// validateEnum ensures that an enum field is valid. Valid enum fields are +// slices of structs containing nothing but non-slice blocks. +func validateEnum(field reflect.StructField) error { + kind := field.Type.Kind() + if kind != reflect.Slice && kind != reflect.Array { + return fmt.Errorf("enum fields can only be slices or arrays") + } + + elementType := deferenceType(field.Type.Elem()) + if elementType.Kind() != reflect.Struct { + return fmt.Errorf("enum fields can only be a slice or array of structs") + } + + enumElementFields := Get(elementType) + for _, field := range enumElementFields { + if !field.IsBlock() { + return fmt.Errorf("fields in an enum element may only be blocks, got " + field.Flags.String()) + } + + fieldType := deferenceType(elementType.FieldByIndex(field.Index).Type) + if fieldType.Kind() != reflect.Struct { + return fmt.Errorf("blocks in an enum element may only be structs, got " + fieldType.Kind().String()) + } + } + + return nil +} diff --git a/syntax/internal/rivertags/rivertags_test.go b/syntax/internal/rivertags/rivertags_test.go new file mode 100644 index 0000000000..43370b33b4 --- /dev/null +++ b/syntax/internal/rivertags/rivertags_test.go @@ -0,0 +1,182 @@ +package rivertags_test + +import ( + "reflect" + "testing" + + "github.com/grafana/river/internal/rivertags" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Get(t *testing.T) { + type Struct struct { + IgnoreMe bool + + ReqAttr string `river:"req_attr,attr"` + OptAttr string `river:"opt_attr,attr,optional"` + ReqBlock struct{} `river:"req_block,block"` + OptBlock struct{} `river:"opt_block,block,optional"` + ReqEnum []struct{} `river:"req_enum,enum"` + OptEnum []struct{} `river:"opt_enum,enum,optional"` + Label string `river:",label"` + } + + fs := rivertags.Get(reflect.TypeOf(Struct{})) + + expect := []rivertags.Field{ + {[]string{"req_attr"}, []int{1}, rivertags.FlagAttr}, + {[]string{"opt_attr"}, []int{2}, rivertags.FlagAttr | rivertags.FlagOptional}, + {[]string{"req_block"}, []int{3}, rivertags.FlagBlock}, + {[]string{"opt_block"}, []int{4}, rivertags.FlagBlock | rivertags.FlagOptional}, + {[]string{"req_enum"}, []int{5}, rivertags.FlagEnum}, + {[]string{"opt_enum"}, []int{6}, rivertags.FlagEnum | rivertags.FlagOptional}, + {[]string{""}, []int{7}, rivertags.FlagLabel}, + } + + require.Equal(t, expect, fs) +} + +func TestEmbedded(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr"` + InnerField2 string `river:"inner_field_2,attr"` + } + + type Struct struct { + Field1 string `river:"parent_field_1,attr"` + InnerStruct + Field2 string `river:"parent_field_2,attr"` + } + require.PanicsWithValue(t, "river: anonymous fields not supported rivertags_test.Struct.InnerStruct", func() { rivertags.Get(reflect.TypeOf(Struct{})) }) +} + +func TestSquash(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr"` + InnerField2 string `river:"inner_field_2,attr"` + } + + type Struct struct { + Field1 string `river:"parent_field_1,attr"` + Inner InnerStruct `river:",squash"` + Field2 string `river:"parent_field_2,attr"` + } + + type StructWithPointer struct { + Field1 string `river:"parent_field_1,attr"` + Inner *InnerStruct `river:",squash"` + Field2 string `river:"parent_field_2,attr"` + } + + expect := []rivertags.Field{ + { + Name: []string{"parent_field_1"}, + Index: []int{0}, + Flags: rivertags.FlagAttr, + }, + { + Name: []string{"inner_field_1"}, + Index: []int{1, 0}, + Flags: rivertags.FlagAttr, + }, + { + Name: []string{"inner_field_2"}, + Index: []int{1, 1}, + Flags: rivertags.FlagAttr, + }, + { + Name: []string{"parent_field_2"}, + Index: []int{2}, + Flags: rivertags.FlagAttr, + }, + } + + structActual := rivertags.Get(reflect.TypeOf(Struct{})) + assert.Equal(t, expect, structActual) + + structPointerActual := rivertags.Get(reflect.TypeOf(StructWithPointer{})) + assert.Equal(t, expect, structPointerActual) +} + +func TestDeepSquash(t *testing.T) { + type Inner2Struct struct { + InnerField1 string `river:"inner_field_1,attr"` + InnerField2 string `river:"inner_field_2,attr"` + } + + type InnerStruct struct { + Inner2Struct Inner2Struct `river:",squash"` + } + + type Struct struct { + Inner InnerStruct `river:",squash"` + } + + expect := []rivertags.Field{ + { + Name: []string{"inner_field_1"}, + Index: []int{0, 0, 0}, + Flags: rivertags.FlagAttr, + }, + { + Name: []string{"inner_field_2"}, + Index: []int{0, 0, 1}, + Flags: rivertags.FlagAttr, + }, + } + + structActual := rivertags.Get(reflect.TypeOf(Struct{})) + assert.Equal(t, expect, structActual) +} + +func Test_Get_Panics(t *testing.T) { + expectPanic := func(t *testing.T, expect string, v interface{}) { + t.Helper() + require.PanicsWithValue(t, expect, func() { + _ = rivertags.Get(reflect.TypeOf(v)) + }) + } + + t.Run("Tagged fields must be exported", func(t *testing.T) { + type Struct struct { + attr string `river:"field,attr"` // nolint:unused //nolint:rivertags + } + expect := `river: river tag found on unexported field at rivertags_test.Struct.attr` + expectPanic(t, expect, Struct{}) + }) + + t.Run("Options are required", func(t *testing.T) { + type Struct struct { + Attr string `river:"field"` //nolint:rivertags + } + expect := `river: field rivertags_test.Struct.Attr tag is missing options` + expectPanic(t, expect, Struct{}) + }) + + t.Run("Field names must be unique", func(t *testing.T) { + type Struct struct { + Attr string `river:"field1,attr"` + Block string `river:"field1,block,optional"` //nolint:rivertags + } + expect := `river: field name field1 already used by rivertags_test.Struct.Attr` + expectPanic(t, expect, Struct{}) + }) + + t.Run("Name is required for non-label field", func(t *testing.T) { + type Struct struct { + Attr string `river:",attr"` //nolint:rivertags + } + expect := `river: non-empty field name required at rivertags_test.Struct.Attr` + expectPanic(t, expect, Struct{}) + }) + + t.Run("Only one label field may exist", func(t *testing.T) { + type Struct struct { + Label1 string `river:",label"` + Label2 string `river:",label"` + } + expect := `river: label field already used by rivertags_test.Struct.Label2` + expectPanic(t, expect, Struct{}) + }) +} diff --git a/syntax/internal/stdlib/constants.go b/syntax/internal/stdlib/constants.go new file mode 100644 index 0000000000..89525f855f --- /dev/null +++ b/syntax/internal/stdlib/constants.go @@ -0,0 +1,19 @@ +package stdlib + +import ( + "os" + "runtime" +) + +var constants = map[string]string{ + "hostname": "", // Initialized via init function + "os": runtime.GOOS, + "arch": runtime.GOARCH, +} + +func init() { + hostname, err := os.Hostname() + if err == nil { + constants["hostname"] = hostname + } +} diff --git a/syntax/internal/stdlib/stdlib.go b/syntax/internal/stdlib/stdlib.go new file mode 100644 index 0000000000..e73b950af8 --- /dev/null +++ b/syntax/internal/stdlib/stdlib.go @@ -0,0 +1,132 @@ +// Package stdlib contains standard library functions exposed to River configs. +package stdlib + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/rivertypes" + "github.com/ohler55/ojg/jp" + "github.com/ohler55/ojg/oj" +) + +// Identifiers holds a list of stdlib identifiers by name. All interface{} +// values are River-compatible values. +// +// Function identifiers are Go functions with exactly one non-error return +// value, with an optionally supported error return value as the second return +// value. +var Identifiers = map[string]interface{}{ + // See constants.go for the definition. + "constants": constants, + + "env": os.Getenv, + + "nonsensitive": func(secret rivertypes.Secret) string { + return string(secret) + }, + + // concat is implemented as a raw function so it can bypass allocations + // converting arguments into []interface{}. concat is optimized to allow it + // to perform well when it is in the hot path for combining targets from many + // other blocks. + "concat": value.RawFunction(func(funcValue value.Value, args ...value.Value) (value.Value, error) { + if len(args) == 0 { + return value.Array(), nil + } + + // finalSize is the final size of the resulting concatenated array. We type + // check our arguments while computing what finalSize will be. + var finalSize int + for i, arg := range args { + if arg.Type() != value.TypeArray { + return value.Null, value.ArgError{ + Function: funcValue, + Argument: arg, + Index: i, + Inner: value.TypeError{ + Value: arg, + Expected: value.TypeArray, + }, + } + } + + finalSize += arg.Len() + } + + // Optimization: if there's only one array, we can just return it directly. + // This is done *after* the previous loop to ensure that args[0] is a River + // array. + if len(args) == 1 { + return args[0], nil + } + + raw := make([]value.Value, 0, finalSize) + for _, arg := range args { + for i := 0; i < arg.Len(); i++ { + raw = append(raw, arg.Index(i)) + } + } + + return value.Array(raw...), nil + }), + + "json_decode": func(in string) (interface{}, error) { + var res interface{} + err := json.Unmarshal([]byte(in), &res) + if err != nil { + return nil, err + } + return res, nil + }, + + "json_path": func(jsonString string, path string) (interface{}, error) { + jsonPathExpr, err := jp.ParseString(path) + if err != nil { + return nil, err + } + + jsonExpr, err := oj.ParseString(jsonString) + if err != nil { + return nil, err + } + + return jsonPathExpr.Get(jsonExpr), nil + }, + + "coalesce": value.RawFunction(func(funcValue value.Value, args ...value.Value) (value.Value, error) { + if len(args) == 0 { + return value.Null, nil + } + + for _, arg := range args { + if arg.Type() == value.TypeNull { + continue + } + + if !arg.Reflect().IsZero() { + if argType := value.RiverType(arg.Reflect().Type()); (argType == value.TypeArray || argType == value.TypeObject) && arg.Len() == 0 { + continue + } + + return arg, nil + } + } + + return args[len(args)-1], nil + }), + + "format": fmt.Sprintf, + "join": strings.Join, + "replace": strings.ReplaceAll, + "split": strings.Split, + "to_lower": strings.ToLower, + "to_upper": strings.ToUpper, + "trim": strings.Trim, + "trim_prefix": strings.TrimPrefix, + "trim_suffix": strings.TrimSuffix, + "trim_space": strings.TrimSpace, +} diff --git a/syntax/internal/value/capsule.go b/syntax/internal/value/capsule.go new file mode 100644 index 0000000000..7a522ff337 --- /dev/null +++ b/syntax/internal/value/capsule.go @@ -0,0 +1,53 @@ +package value + +import ( + "fmt" +) + +// Capsule is a marker interface for Go values which forces a type to be +// represented as a River capsule. This is useful for types whose underlying +// value is not a capsule, such as: +// +// // Secret is a secret value. It would normally be a River string since the +// // underlying Go type is string, but it's a capsule since it implements +// // the Capsule interface. +// type Secret string +// +// func (s Secret) RiverCapsule() {} +// +// Extension interfaces are used to describe additional behaviors for Capsules. +// ConvertibleCapsule allows defining custom conversion rules to convert +// between other Go values. +type Capsule interface { + RiverCapsule() +} + +// ErrNoConversion is returned by implementations of ConvertibleCapsule to +// denote that a custom conversion from or to a specific type is unavailable. +var ErrNoConversion = fmt.Errorf("no custom capsule conversion available") + +// ConvertibleFromCapsule is a Capsule which supports custom conversion rules +// from any Go type which is not the same as the capsule type. +type ConvertibleFromCapsule interface { + Capsule + + // ConvertFrom should modify the ConvertibleCapsule value based on the value + // of src. + // + // ConvertFrom should return ErrNoConversion if no conversion is available + // from src. + ConvertFrom(src interface{}) error +} + +// ConvertibleIntoCapsule is a Capsule which supports custom conversion rules +// into any Go type which is not the same as the capsule type. +type ConvertibleIntoCapsule interface { + Capsule + + // ConvertInto should convert its value and store it into dst. dst will be a + // pointer to a value which ConvertInto is expected to update. + // + // ConvertInto should return ErrNoConversion if no conversion into dst is + // available. + ConvertInto(dst interface{}) error +} diff --git a/syntax/internal/value/decode.go b/syntax/internal/value/decode.go new file mode 100644 index 0000000000..20df78eb6a --- /dev/null +++ b/syntax/internal/value/decode.go @@ -0,0 +1,674 @@ +package value + +import ( + "encoding" + "errors" + "fmt" + "math" + "reflect" + "time" + + "github.com/grafana/river/internal/reflectutil" +) + +// The Defaulter interface allows a type to implement default functionality +// in River evaluation. +// +// Defaulter will be called only on block and body river types. +// +// When using nested blocks, the wrapping type must also implement +// Defaulter to propagate the defaults of the wrapped type. Otherwise, +// defaults used for the wrapped type become inconsistent: +// +// - If the wrapped block is NOT defined in the River config, the wrapping +// type's defaults are used. +// - If the wrapped block IS defined in the River config, the wrapped type's +// defaults are used. +type Defaulter interface { + // SetToDefault is called when evaluating a block or body to set the value + // to its defaults. + SetToDefault() +} + +// Unmarshaler is a custom type which can be used to hook into the decoder. +type Unmarshaler interface { + // UnmarshalRiver is called when decoding a value. f should be invoked to + // continue decoding with a value to decode into. + UnmarshalRiver(f func(v interface{}) error) error +} + +// The Validator interface allows a type to implement validation functionality +// in River evaluation. +type Validator interface { + // Validate is called when evaluating a block or body to enforce the + // value is valid. + Validate() error +} + +// Decode assigns a Value val to a Go pointer target. Pointers will be +// allocated as necessary when decoding. +// +// As a performance optimization, the underlying Go value of val will be +// assigned directly to target if the Go types match. This means that pointers, +// slices, and maps will be passed by reference. Callers should take care not +// to modify any Values after decoding, unless it is expected by the contract +// of the type (i.e., when the type exposes a goroutine-safe API). In other +// cases, new maps and slices will be allocated as necessary. Call DecodeCopy +// to make a copy of val instead. +// +// When a direct assignment is not done, Decode first checks to see if target +// implements the Unmarshaler or text.Unmarshaler interface, invoking methods +// as appropriate. It will also use time.ParseDuration if target is +// *time.Duration. +// +// Next, Decode will attempt to convert val to the type expected by target for +// assignment. If val or target implement ConvertibleCapsule, conversion +// between values will be attempted by calling ConvertFrom and ConvertInto as +// appropriate. If val cannot be converted, an error is returned. +// +// River null values will decode into a nil Go pointer or the zero value for +// the non-pointer type. +// +// Decode will panic if target is not a pointer. +func Decode(val Value, target interface{}) error { + rt := reflect.ValueOf(target) + if rt.Kind() != reflect.Pointer { + panic("river/value: Decode called with non-pointer value") + } + + var d decoder + return d.decode(val, rt) +} + +// DecodeCopy is like Decode but a deep copy of val is always made. +// +// Unlike Decode, DecodeCopy will always invoke Unmarshaler and +// text.Unmarshaler interfaces (if implemented by target). +func DecodeCopy(val Value, target interface{}) error { + rt := reflect.ValueOf(target) + if rt.Kind() != reflect.Pointer { + panic("river/value: Decode called with non-pointer value") + } + + d := decoder{makeCopy: true} + return d.decode(val, rt) +} + +type decoder struct { + makeCopy bool +} + +func (d *decoder) decode(val Value, into reflect.Value) (err error) { + // If everything has decoded successfully, run Validate if implemented. + defer func() { + if err == nil { + if into.CanAddr() && into.Addr().Type().Implements(goRiverValidator) { + err = into.Addr().Interface().(Validator).Validate() + } else if into.Type().Implements(goRiverValidator) { + err = into.Interface().(Validator).Validate() + } + } + }() + + // Store the raw value from val and try to address it so we can do underlying + // type match assignment. + rawValue := val.rv + if rawValue.CanAddr() { + rawValue = rawValue.Addr() + } + + // Fully deference into and allocate pointers as necessary. + for into.Kind() == reflect.Pointer { + // Check for direct assignments before allocating pointers and dereferencing. + // This preserves pointer addresses when decoding an *int into an *int. + switch { + case into.CanSet() && val.Type() == TypeNull: + into.Set(reflect.Zero(into.Type())) + return nil + case into.CanSet() && d.canDirectlyAssign(rawValue.Type(), into.Type()): + into.Set(rawValue) + return nil + case into.CanSet() && d.canDirectlyAssign(val.rv.Type(), into.Type()): + into.Set(val.rv) + return nil + } + + if into.IsNil() { + into.Set(reflect.New(into.Type().Elem())) + } + into = into.Elem() + } + + // Ww need to preform the same switch statement as above after the loop to + // check for direct assignment one more time on the fully deferenced types. + // + // NOTE(rfratto): we skip the rawValue assignment check since that's meant + // for assigning pointers, and into is never a pointer when we reach here. + switch { + case into.CanSet() && val.Type() == TypeNull: + into.Set(reflect.Zero(into.Type())) + return nil + case into.CanSet() && d.canDirectlyAssign(val.rv.Type(), into.Type()): + into.Set(val.rv) + return nil + } + + // Special decoding rules: + // + // 1. If into is an interface{}, go through decodeAny so it gets assigned + // predictable types. + // 2. If into implements a supported interface, use the interface for + // decoding instead. + if into.Type() == goAny { + return d.decodeAny(val, into) + } else if ok, err := d.decodeFromInterface(val, into); ok { + return err + } + + if into.CanAddr() && into.Addr().Type().Implements(goRiverDefaulter) { + into.Addr().Interface().(Defaulter).SetToDefault() + } else if into.Type().Implements(goRiverDefaulter) { + into.Interface().(Defaulter).SetToDefault() + } + + targetType := RiverType(into.Type()) + + // Track a value to use for decoding. This value will be updated if + // conversion is necessary. + // + // NOTE(rfratto): we don't reassign to val here, since Go 1.18 thinks that + // means it escapes the heap. We need to create a local variable to avoid + // extra allocations. + convVal := val + + // Convert the value. + switch { + case val.rv.Type() == goByteSlice && into.Type() == goString: // []byte -> string + into.Set(val.rv.Convert(goString)) + return nil + case val.rv.Type() == goString && into.Type() == goByteSlice: // string -> []byte + into.Set(val.rv.Convert(goByteSlice)) + return nil + case convVal.Type() != targetType: + converted, err := tryCapsuleConvert(convVal, into, targetType) + if err != nil { + return err + } else if converted { + return nil + } + + convVal, err = convertValue(convVal, targetType) + if err != nil { + return err + } + } + + // Slowest case: recursive decoding. Once we've reached this point, we know + // that convVal.rv and into are compatible Go types. + switch convVal.Type() { + case TypeNumber: + into.Set(convertGoNumber(convVal.Number(), into.Type())) + return nil + case TypeString: + // Call convVal.Text() to get the final string value, since convVal.rv + // might not be a string. + into.Set(reflect.ValueOf(convVal.Text())) + return nil + case TypeBool: + into.Set(reflect.ValueOf(convVal.Bool())) + return nil + case TypeArray: + return d.decodeArray(convVal, into) + case TypeObject: + return d.decodeObject(convVal, into) + case TypeFunction: + // The Go types for two functions must be the same. + // + // TODO(rfratto): we may want to consider being more lax here, potentially + // creating an adapter between the two functions. + if convVal.rv.Type() == into.Type() { + into.Set(convVal.rv) + return nil + } + + return Error{ + Value: val, + Inner: fmt.Errorf("expected function(%s), got function(%s)", into.Type(), convVal.rv.Type()), + } + case TypeCapsule: + // The Go types for the capsules must be the same or able to be converted. + if convVal.rv.Type() == into.Type() { + into.Set(convVal.rv) + return nil + } + + converted, err := tryCapsuleConvert(convVal, into, targetType) + if err != nil { + return err + } else if converted { + return nil + } + + // TODO(rfratto): return a TypeError for this instead. TypeError isn't + // appropriate at the moment because it would just print "capsule", which + // doesn't contain all the information the user would want to know (e.g., a + // capsule of what inner type?). + return Error{ + Value: val, + Inner: fmt.Errorf("expected capsule(%q), got %s", into.Type(), convVal.Describe()), + } + default: + panic("river/value: unexpected kind " + convVal.Type().String()) + } +} + +// canDirectlyAssign returns true if the `from` type can be directly asssigned +// to the `into` type. This always returns false if the decoder is set to make +// copies or into contains an interface{} type anywhere in its type definition +// to allow for decoding interfaces{} into a set of known types. +func (d *decoder) canDirectlyAssign(from reflect.Type, into reflect.Type) bool { + if d.makeCopy { + return false + } + if from != into { + return false + } + return !containsAny(into) +} + +// containsAny recursively traverses through into, returning true if it +// contains an interface{} value anywhere in its structure. +func containsAny(into reflect.Type) bool { + // TODO(rfratto): cache result of this function? + + if into == goAny { + return true + } + + switch into.Kind() { + case reflect.Array, reflect.Pointer, reflect.Slice: + return containsAny(into.Elem()) + case reflect.Map: + if into.Key() == goString { + return containsAny(into.Elem()) + } + return false + + case reflect.Struct: + for i := 0; i < into.NumField(); i++ { + if containsAny(into.Field(i).Type) { + return true + } + } + return false + + default: + // Other kinds are not River types where the decodeAny check applies. + return false + } +} + +func (d *decoder) decodeFromInterface(val Value, into reflect.Value) (ok bool, err error) { + // into may only implement interface types for a pointer receiver, so we want + // to address into if possible. + if into.CanAddr() { + into = into.Addr() + } + + switch { + case into.Type() == goDurationPtr: + var s string + err := d.decode(val, reflect.ValueOf(&s)) + if err != nil { + return true, err + } + dur, err := time.ParseDuration(s) + if err != nil { + return true, Error{Value: val, Inner: err} + } + *into.Interface().(*time.Duration) = dur + return true, nil + + case into.Type().Implements(goRiverDecoder): + err := into.Interface().(Unmarshaler).UnmarshalRiver(func(v interface{}) error { + return d.decode(val, reflect.ValueOf(v)) + }) + if err != nil { + // TODO(rfratto): we need to detect if error is one of the error types + // from this package and only wrap it in an Error if it isn't. + return true, Error{Value: val, Inner: err} + } + return true, nil + + case into.Type().Implements(goTextUnmarshaler): + var s string + err := d.decode(val, reflect.ValueOf(&s)) + if err != nil { + return true, err + } + err = into.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(s)) + if err != nil { + return true, Error{Value: val, Inner: err} + } + return true, nil + } + + return false, nil +} + +func tryCapsuleConvert(from Value, into reflect.Value, intoType Type) (ok bool, err error) { + // Check to see if we can use capsule conversion. + if from.Type() == TypeCapsule { + cc, ok := from.Interface().(ConvertibleIntoCapsule) + if ok { + // It's always possible to Addr the reflect.Value below since we expect + // it to be a settable non-pointer value. + err := cc.ConvertInto(into.Addr().Interface()) + if err == nil { + return true, nil + } else if err != nil && !errors.Is(err, ErrNoConversion) { + return false, Error{Value: from, Inner: err} + } + } + } + + if intoType == TypeCapsule { + cc, ok := into.Addr().Interface().(ConvertibleFromCapsule) + if ok { + err := cc.ConvertFrom(from.Interface()) + if err == nil { + return true, nil + } else if err != nil && !errors.Is(err, ErrNoConversion) { + return false, Error{Value: from, Inner: err} + } + } + } + + // Last attempt: allow converting two capsules if the Go types are compatible + // and the into kind is an interface. + // + // TODO(rfratto): we may consider expanding this to allowing conversion to + // any compatible Go type in the future (not just interfaces). + if from.Type() == TypeCapsule && intoType == TypeCapsule && into.Kind() == reflect.Interface { + // We try to convert a pointer to from first to avoid making unnecessary + // copies. + if from.Reflect().CanAddr() && from.Reflect().Addr().CanConvert(into.Type()) { + val := from.Reflect().Addr().Convert(into.Type()) + into.Set(val) + return true, nil + } else if from.Reflect().CanConvert(into.Type()) { + val := from.Reflect().Convert(into.Type()) + into.Set(val) + return true, nil + } + } + + return false, nil +} + +// decodeAny is invoked by decode when into is an interface{}. We assign the +// interface{} a known type based on the River value being decoded: +// +// Null values: nil +// Number values: float64, int, int64, or uint64. +// If the underlying type is a float, always decode to a float64. +// For non-floats the order of preference is int -> int64 -> uint64. +// Arrays: []interface{} +// Objects: map[string]interface{} +// Bool: bool +// String: string +// Function: Passthrough of the underlying function value +// Capsule: Passthrough of the underlying capsule value +// +// In the cases where we do not pass through the underlying value, we create a +// value of that type, recursively call decode to populate that new value, and +// then store that value into the interface{}. +func (d *decoder) decodeAny(val Value, into reflect.Value) error { + var ptr reflect.Value + + switch val.Type() { + case TypeNull: + into.Set(reflect.Zero(into.Type())) + return nil + + case TypeNumber: + + switch val.Number().Kind() { + case NumberKindFloat: + var v float64 + ptr = reflect.ValueOf(&v) + case NumberKindUint: + uint64Val := val.Uint() + if uint64Val <= math.MaxInt { + var v int + ptr = reflect.ValueOf(&v) + } else if uint64Val <= math.MaxInt64 { + var v int64 + ptr = reflect.ValueOf(&v) + } else { + var v uint64 + ptr = reflect.ValueOf(&v) + } + case NumberKindInt: + int64Val := val.Int() + if math.MinInt <= int64Val && int64Val <= math.MaxInt { + var v int + ptr = reflect.ValueOf(&v) + } else { + var v int64 + ptr = reflect.ValueOf(&v) + } + + default: + panic("river/value: unreachable") + } + + case TypeArray: + var v []interface{} + ptr = reflect.ValueOf(&v) + + case TypeObject: + var v map[string]interface{} + ptr = reflect.ValueOf(&v) + + case TypeBool: + var v bool + ptr = reflect.ValueOf(&v) + + case TypeString: + var v string + ptr = reflect.ValueOf(&v) + + case TypeFunction, TypeCapsule: + // Functions and capsules must be directly assigned since there's no + // "generic" representation for either. + // + // We retain the pointer if we were given a pointer. + + if val.rv.CanAddr() { + into.Set(val.rv.Addr()) + return nil + } + + into.Set(val.rv) + return nil + + default: + panic("river/value: unreachable") + } + + if err := d.decode(val, ptr); err != nil { + return err + } + into.Set(ptr.Elem()) + return nil +} + +func (d *decoder) decodeArray(val Value, rt reflect.Value) error { + switch rt.Kind() { + case reflect.Slice: + res := reflect.MakeSlice(rt.Type(), val.Len(), val.Len()) + for i := 0; i < val.Len(); i++ { + // Decode the original elements into the new elements. + if err := d.decode(val.Index(i), res.Index(i)); err != nil { + return ElementError{Value: val, Index: i, Inner: err} + } + } + rt.Set(res) + + case reflect.Array: + res := reflect.New(rt.Type()).Elem() + + if val.Len() != res.Len() { + return Error{ + Value: val, + Inner: fmt.Errorf("array must have exactly %d elements, got %d", res.Len(), val.Len()), + } + } + + for i := 0; i < val.Len(); i++ { + if err := d.decode(val.Index(i), res.Index(i)); err != nil { + return ElementError{Value: val, Index: i, Inner: err} + } + } + rt.Set(res) + + default: + panic(fmt.Sprintf("river/value: unexpected array type %s", val.rv.Kind())) + } + + return nil +} + +func (d *decoder) decodeObject(val Value, rt reflect.Value) error { + switch rt.Kind() { + case reflect.Struct: + targetTags := getCachedTags(rt.Type()) + return d.decodeObjectToStruct(val, rt, targetTags, false) + + case reflect.Slice, reflect.Array: // Slice of labeled blocks + keys := val.Keys() + + var res reflect.Value + + if rt.Kind() == reflect.Slice { + res = reflect.MakeSlice(rt.Type(), len(keys), len(keys)) + } else { // Array + res = reflect.New(rt.Type()).Elem() + + if res.Len() != len(keys) { + return Error{ + Value: val, + Inner: fmt.Errorf("object must have exactly %d keys, got %d", res.Len(), len(keys)), + } + } + } + + fields := getCachedTags(rt.Type().Elem()) + labelField, _ := fields.LabelField() + + for i, key := range keys { + // First decode the key into the label. + elem := res.Index(i) + reflectutil.GetOrAlloc(elem, labelField).Set(reflect.ValueOf(key)) + + // Now decode the inner object. + value, _ := val.Key(key) + if err := d.decodeObjectToStruct(value, elem, fields, true); err != nil { + return FieldError{Value: val, Field: key, Inner: err} + } + } + rt.Set(res) + + case reflect.Map: + if rt.Type().Key() != goString { + // Maps with non-string types are treated as capsules and can't be + // decoded from maps. + return TypeError{Value: val, Expected: RiverType(rt.Type())} + } + + res := reflect.MakeMapWithSize(rt.Type(), val.Len()) + + // Create a shared value to decode each element into. This will be zeroed + // out for each key, and then copied when setting the map index. + into := reflect.New(rt.Type().Elem()).Elem() + intoZero := reflect.Zero(into.Type()) + + for i, key := range val.Keys() { + // We ignore the ok value because we know it exists. + value, _ := val.Key(key) + + // Zero out the value if it was decoded in the previous loop. + if i > 0 { + into.Set(intoZero) + } + // Decode into our element. + if err := d.decode(value, into); err != nil { + return FieldError{Value: val, Field: key, Inner: err} + } + + // Then set the map index. + res.SetMapIndex(reflect.ValueOf(key), into) + } + + rt.Set(res) + + default: + panic(fmt.Sprintf("river/value: unexpected target type %s", rt.Kind())) + } + + return nil +} + +func (d *decoder) decodeObjectToStruct(val Value, rt reflect.Value, fields *objectFields, decodedLabel bool) error { + // TODO(rfratto): this needs to check for required keys being set + + for _, key := range val.Keys() { + // We ignore the ok value because we know it exists. + value, _ := val.Key(key) + + // Struct labels should be decoded first, since objects are wrapped in + // labels. If we have yet to decode the label, decode it now. + if lf, ok := fields.LabelField(); ok && !decodedLabel { + // Safety check: if the inner field isn't an object, there's something + // wrong here. It's unclear if a user can craft an expression that hits + // this case, but it's left in for safety. + if value.Type() != TypeObject { + return FieldError{ + Value: val, + Field: key, + Inner: TypeError{Value: value, Expected: TypeObject}, + } + } + + // Decode the key into the label. + reflectutil.GetOrAlloc(rt, lf).Set(reflect.ValueOf(key)) + + // ...and then code the rest of the object. + if err := d.decodeObjectToStruct(value, rt, fields, true); err != nil { + return err + } + continue + } + + switch fields.Has(key) { + case objectKeyTypeInvalid: + return MissingKeyError{Value: value, Missing: key} + case objectKeyTypeNestedField: // Block with multiple name fragments + next, _ := fields.NestedField(key) + // Recurse the call with the inner value. + if err := d.decodeObjectToStruct(value, rt, next, decodedLabel); err != nil { + return err + } + case objectKeyTypeField: // Single-name fragment + targetField, _ := fields.Field(key) + targetValue := reflectutil.GetOrAlloc(rt, targetField) + + if err := d.decode(value, targetValue); err != nil { + return FieldError{Value: val, Field: key, Inner: err} + } + } + } + + return nil +} diff --git a/syntax/internal/value/decode_benchmarks_test.go b/syntax/internal/value/decode_benchmarks_test.go new file mode 100644 index 0000000000..9a33239329 --- /dev/null +++ b/syntax/internal/value/decode_benchmarks_test.go @@ -0,0 +1,90 @@ +package value_test + +import ( + "fmt" + "testing" + + "github.com/grafana/river/internal/value" +) + +func BenchmarkObjectDecode(b *testing.B) { + b.StopTimer() + + // Create a value with 20 keys. + source := make(map[string]string, 20) + for i := 0; i < 20; i++ { + var ( + key = fmt.Sprintf("key_%d", i+1) + value = fmt.Sprintf("value_%d", i+1) + ) + source[key] = value + } + + sourceVal := value.Encode(source) + + b.StartTimer() + for i := 0; i < b.N; i++ { + var dst map[string]string + _ = value.Decode(sourceVal, &dst) + } +} + +func BenchmarkObject(b *testing.B) { + b.Run("Non-capsule", func(b *testing.B) { + b.StopTimer() + + vals := make(map[string]value.Value) + for i := 0; i < 20; i++ { + vals[fmt.Sprintf("%d", i)] = value.Int(int64(i)) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + _ = value.Object(vals) + } + }) + + b.Run("Capsule", func(b *testing.B) { + b.StopTimer() + + vals := make(map[string]value.Value) + for i := 0; i < 20; i++ { + vals[fmt.Sprintf("%d", i)] = value.Encapsulate(make(chan int)) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + _ = value.Object(vals) + } + }) +} + +func BenchmarkArray(b *testing.B) { + b.Run("Non-capsule", func(b *testing.B) { + b.StopTimer() + + var vals []value.Value + for i := 0; i < 20; i++ { + vals = append(vals, value.Int(int64(i))) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + _ = value.Array(vals...) + } + }) + + b.Run("Capsule", func(b *testing.B) { + b.StopTimer() + + var vals []value.Value + for i := 0; i < 20; i++ { + vals = append(vals, value.Encapsulate(make(chan int))) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + _ = value.Array(vals...) + } + }) +} diff --git a/syntax/internal/value/decode_test.go b/syntax/internal/value/decode_test.go new file mode 100644 index 0000000000..5b84838bd0 --- /dev/null +++ b/syntax/internal/value/decode_test.go @@ -0,0 +1,761 @@ +package value_test + +import ( + "fmt" + "math" + "reflect" + "testing" + "time" + "unsafe" + + "github.com/grafana/river/internal/value" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDecode_Numbers(t *testing.T) { + // There's a lot of values that can represent numbers, so we construct a + // matrix dynamically of all the combinations here. + vals := []interface{}{ + int(15), int8(15), int16(15), int32(15), int64(15), + uint(15), uint8(15), uint16(15), uint32(15), uint64(15), + float32(15), float64(15), + string("15"), // string holding a valid number (which can be converted to a number) + } + + for _, input := range vals { + for _, expect := range vals { + val := value.Encode(input) + + name := fmt.Sprintf( + "%s to %s", + reflect.TypeOf(input), + reflect.TypeOf(expect), + ) + + t.Run(name, func(t *testing.T) { + vPtr := reflect.New(reflect.TypeOf(expect)).Interface() + require.NoError(t, value.Decode(val, vPtr)) + + actual := reflect.ValueOf(vPtr).Elem().Interface() + require.Equal(t, expect, actual) + }) + } + } +} + +func TestDecode(t *testing.T) { + // Declare some types to use for testing. Person2 is used as a struct + // equivalent to Person, but with a different Go type to force casting. + type Person struct { + Name string `river:"name,attr"` + } + + type Person2 struct { + Name string `river:"name,attr"` + } + + tt := []struct { + input, expect interface{} + }{ + {nil, (*int)(nil)}, + + // Non-number primitives. + {string("Hello!"), string("Hello!")}, + {bool(true), bool(true)}, + + // Arrays + {[]int{1, 2, 3}, []int{1, 2, 3}}, + {[]int{1, 2, 3}, [...]int{1, 2, 3}}, + {[...]int{1, 2, 3}, []int{1, 2, 3}}, + {[...]int{1, 2, 3}, [...]int{1, 2, 3}}, + + // Maps + {map[string]int{"year": 2022}, map[string]uint{"year": 2022}}, + {map[string]string{"name": "John"}, map[string]string{"name": "John"}}, + {map[string]string{"name": "John"}, Person{Name: "John"}}, + {Person{Name: "John"}, map[string]string{"name": "John"}}, + {Person{Name: "John"}, Person{Name: "John"}}, + {Person{Name: "John"}, Person2{Name: "John"}}, + {Person2{Name: "John"}, Person{Name: "John"}}, + + // NOTE(rfratto): we don't test capsules or functions here because they're + // not comparable in the same way as we do the other tests. + // + // See TestDecode_Functions and TestDecode_Capsules for specific decoding + // tests of those types. + } + + for _, tc := range tt { + val := value.Encode(tc.input) + + name := fmt.Sprintf( + "%s (%s) to %s", + val.Type(), + reflect.TypeOf(tc.input), + reflect.TypeOf(tc.expect), + ) + + t.Run(name, func(t *testing.T) { + vPtr := reflect.New(reflect.TypeOf(tc.expect)).Interface() + require.NoError(t, value.Decode(val, vPtr)) + + actual := reflect.ValueOf(vPtr).Elem().Interface() + + require.Equal(t, tc.expect, actual) + }) + } +} + +// TestDecode_PreservePointer ensures that pointer addresses can be preserved +// when decoding. +func TestDecode_PreservePointer(t *testing.T) { + num := 5 + val := value.Encode(&num) + + var nump *int + require.NoError(t, value.Decode(val, &nump)) + require.Equal(t, unsafe.Pointer(nump), unsafe.Pointer(&num)) +} + +// TestDecode_PreserveMapReference ensures that map references can be preserved +// when decoding. +func TestDecode_PreserveMapReference(t *testing.T) { + m := make(map[string]string) + val := value.Encode(m) + + var actual map[string]string + require.NoError(t, value.Decode(val, &actual)) + + // We can't check to see if the pointers of m and actual match, but we can + // modify m to see if actual is also modified. + m["foo"] = "bar" + require.Equal(t, "bar", actual["foo"]) +} + +// TestDecode_PreserveSliceReference ensures that slice references can be +// preserved when decoding. +func TestDecode_PreserveSliceReference(t *testing.T) { + s := make([]string, 3) + val := value.Encode(s) + + var actual []string + require.NoError(t, value.Decode(val, &actual)) + + // We can't check to see if the pointers of m and actual match, but we can + // modify s to see if actual is also modified. + s[0] = "Hello, world!" + require.Equal(t, "Hello, world!", actual[0]) +} +func TestDecode_Functions(t *testing.T) { + val := value.Encode(func() int { return 15 }) + + var f func() int + require.NoError(t, value.Decode(val, &f)) + require.Equal(t, 15, f()) +} + +func TestDecode_Capsules(t *testing.T) { + expect := make(chan int, 5) + + var actual chan int + require.NoError(t, value.Decode(value.Encode(expect), &actual)) + require.Equal(t, expect, actual) +} + +type ValueInterface interface{ SomeMethod() } + +type Value1 struct{ test string } + +func (c Value1) SomeMethod() {} + +// TestDecode_CapsuleInterface tests that we are able to decode when +// the target `into` is an interface. +func TestDecode_CapsuleInterface(t *testing.T) { + tt := []struct { + name string + value ValueInterface + expected ValueInterface + }{ + { + name: "Capsule to Capsule", + value: Value1{test: "true"}, + expected: Value1{test: "true"}, + }, + { + name: "Capsule Pointer to Capsule", + value: &Value1{test: "true"}, + expected: &Value1{test: "true"}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + var actual ValueInterface + require.NoError(t, value.Decode(value.Encode(tc.value), &actual)) + + // require.Same validates the memory address matches after Decode. + if reflect.TypeOf(tc.value).Kind() == reflect.Pointer { + require.Same(t, tc.value, actual) + } + + // We use tc.expected to validate the properties of actual match the + // original tc.value properties (nothing has mutated them during the test). + require.Equal(t, tc.expected, actual) + }) + } +} + +// TestDecode_CapsulesError tests that we are unable to decode when +// the target `into` is not an interface. +func TestDecode_CapsulesError(t *testing.T) { + type Capsule1 struct{ test string } + type Capsule2 Capsule1 + + v := Capsule1{test: "true"} + actual := Capsule2{} + + require.EqualError(t, value.Decode(value.Encode(v), &actual), `expected capsule("value_test.Capsule2"), got capsule("value_test.Capsule1")`) +} + +// TestDecodeCopy_SliceCopy ensures that copies are made during decoding +// instead of setting values directly. +func TestDecodeCopy_SliceCopy(t *testing.T) { + orig := []int{1, 2, 3} + + var res []int + require.NoError(t, value.DecodeCopy(value.Encode(orig), &res)) + + res[0] = 10 + require.Equal(t, []int{1, 2, 3}, orig, "Original slice should not have been modified") +} + +// TestDecodeCopy_ArrayCopy ensures that copies are made during decoding +// instead of setting values directly. +func TestDecode_ArrayCopy(t *testing.T) { + orig := [...]int{1, 2, 3} + + var res [3]int + require.NoError(t, value.DecodeCopy(value.Encode(orig), &res)) + + res[0] = 10 + require.Equal(t, [3]int{1, 2, 3}, orig, "Original array should not have been modified") +} + +func TestDecode_CustomTypes(t *testing.T) { + t.Run("object to Unmarshaler", func(t *testing.T) { + var actual customUnmarshaler + require.NoError(t, value.Decode(value.Object(nil), &actual)) + require.True(t, actual.UnmarshalCalled, "UnmarshalRiver was not invoked") + require.True(t, actual.DefaultCalled, "SetToDefault was not invoked") + require.True(t, actual.ValidateCalled, "Validate was not invoked") + }) + + t.Run("TextMarshaler to TextUnmarshaler", func(t *testing.T) { + now := time.Now() + + var actual time.Time + require.NoError(t, value.Decode(value.Encode(now), &actual)) + require.True(t, now.Equal(actual)) + }) + + t.Run("time.Duration to time.Duration", func(t *testing.T) { + dur := 15 * time.Second + + var actual time.Duration + require.NoError(t, value.Decode(value.Encode(dur), &actual)) + require.Equal(t, dur, actual) + }) + + t.Run("string to TextUnmarshaler", func(t *testing.T) { + now := time.Now() + nowBytes, _ := now.MarshalText() + + var actual time.Time + require.NoError(t, value.Decode(value.String(string(nowBytes)), &actual)) + + actualBytes, _ := actual.MarshalText() + require.Equal(t, nowBytes, actualBytes) + }) + + t.Run("string to time.Duration", func(t *testing.T) { + dur := 15 * time.Second + + var actual time.Duration + require.NoError(t, value.Decode(value.String(dur.String()), &actual)) + require.Equal(t, dur.String(), actual.String()) + }) +} + +type customUnmarshaler struct { + UnmarshalCalled bool `river:"unmarshal_called,attr,optional"` + DefaultCalled bool `river:"default_called,attr,optional"` + ValidateCalled bool `river:"validate_called,attr,optional"` +} + +func (cu *customUnmarshaler) UnmarshalRiver(f func(interface{}) error) error { + cu.UnmarshalCalled = true + return f((*customUnmarshalerTarget)(cu)) +} + +type customUnmarshalerTarget customUnmarshaler + +func (s *customUnmarshalerTarget) SetToDefault() { + s.DefaultCalled = true +} + +func (s *customUnmarshalerTarget) Validate() error { + s.ValidateCalled = true + return nil +} + +type textEnumType bool + +func (et *textEnumType) UnmarshalText(text []byte) error { + *et = false + + switch string(text) { + case "accepted_value": + *et = true + return nil + default: + return fmt.Errorf("unrecognized value %q", string(text)) + } +} + +func TestDecode_TextUnmarshaler(t *testing.T) { + t.Run("valid type and value", func(t *testing.T) { + var et textEnumType + require.NoError(t, value.Decode(value.String("accepted_value"), &et)) + require.Equal(t, textEnumType(true), et) + }) + + t.Run("invalid type", func(t *testing.T) { + var et textEnumType + err := value.Decode(value.Bool(true), &et) + require.EqualError(t, err, "expected string, got bool") + }) + + t.Run("invalid value", func(t *testing.T) { + var et textEnumType + err := value.Decode(value.String("bad_value"), &et) + require.EqualError(t, err, `unrecognized value "bad_value"`) + }) + + t.Run("unmarshaler nested in other value", func(t *testing.T) { + input := value.Array( + value.String("accepted_value"), + value.String("accepted_value"), + value.String("accepted_value"), + ) + + var ett []textEnumType + require.NoError(t, value.Decode(input, &ett)) + require.Equal(t, []textEnumType{true, true, true}, ett) + }) +} + +func TestDecode_ErrorChain(t *testing.T) { + type Target struct { + Key struct { + Object struct { + Field1 []int `river:"field1,attr"` + } `river:"object,attr"` + } `river:"key,attr"` + } + + val := value.Object(map[string]value.Value{ + "key": value.Object(map[string]value.Value{ + "object": value.Object(map[string]value.Value{ + "field1": value.Array( + value.Int(15), + value.Int(30), + value.String("Hello, world!"), + ), + }), + }), + }) + + // NOTE(rfratto): strings of errors from the value package are fairly limited + // in the amount of information they show, since the value package doesn't + // have a great way to pretty-print the chain of errors. + // + // For example, with the error below, the message doesn't explain where the + // string is coming from, even though the error values hold that context. + // + // Callers consuming errors should print the error chain with extra context + // so it's more useful to users. + err := value.Decode(val, &Target{}) + expectErr := `expected number, got string` + require.EqualError(t, err, expectErr) +} + +type boolish int + +var _ value.ConvertibleFromCapsule = (*boolish)(nil) +var _ value.ConvertibleIntoCapsule = (boolish)(0) + +func (b boolish) RiverCapsule() {} + +func (b *boolish) ConvertFrom(src interface{}) error { + switch v := src.(type) { + case bool: + if v { + *b = 1 + } else { + *b = 0 + } + return nil + } + + return value.ErrNoConversion +} + +func (b boolish) ConvertInto(dst interface{}) error { + switch d := dst.(type) { + case *bool: + if b == 0 { + *d = false + } else { + *d = true + } + return nil + } + + return value.ErrNoConversion +} + +func TestDecode_CustomConvert(t *testing.T) { + t.Run("compatible type to custom", func(t *testing.T) { + var b boolish + err := value.Decode(value.Bool(true), &b) + require.NoError(t, err) + require.Equal(t, boolish(1), b) + }) + + t.Run("custom to compatible type", func(t *testing.T) { + var b bool + err := value.Decode(value.Encapsulate(boolish(10)), &b) + require.NoError(t, err) + require.Equal(t, true, b) + }) + + t.Run("incompatible type to custom", func(t *testing.T) { + var b boolish + err := value.Decode(value.String("true"), &b) + require.EqualError(t, err, "expected capsule, got string") + }) + + t.Run("custom to incompatible type", func(t *testing.T) { + src := boolish(10) + + var s string + err := value.Decode(value.Encapsulate(&src), &s) + require.EqualError(t, err, "expected string, got capsule") + }) +} + +func TestDecode_SquashedFields(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr,optional"` + InnerField2 string `river:"inner_field_2,attr,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner InnerStruct `river:",squash"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + in = map[string]string{ + "outer_field_1": "value1", + "outer_field_2": "value2", + "inner_field_1": "value3", + "inner_field_2": "value4", + } + expect = OuterStruct{ + OuterField1: "value1", + Inner: InnerStruct{ + InnerField1: "value3", + InnerField2: "value4", + }, + OuterField2: "value2", + } + ) + + var out OuterStruct + err := value.Decode(value.Encode(in), &out) + require.NoError(t, err) + require.Equal(t, expect, out) +} + +func TestDecode_SquashedFields_Pointer(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr,optional"` + InnerField2 string `river:"inner_field_2,attr,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner *InnerStruct `river:",squash"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + in = map[string]string{ + "outer_field_1": "value1", + "outer_field_2": "value2", + "inner_field_1": "value3", + "inner_field_2": "value4", + } + expect = OuterStruct{ + OuterField1: "value1", + Inner: &InnerStruct{ + InnerField1: "value3", + InnerField2: "value4", + }, + OuterField2: "value2", + } + ) + + var out OuterStruct + err := value.Decode(value.Encode(in), &out) + require.NoError(t, err) + require.Equal(t, expect, out) +} + +func TestDecode_Slice(t *testing.T) { + type Block struct { + Attr int `river:"attr,attr"` + } + + type Struct struct { + Blocks []Block `river:"block.a,block,optional"` + } + + var ( + in = map[string]interface{}{ + "block": map[string]interface{}{ + "a": []map[string]interface{}{ + {"attr": 1}, + {"attr": 2}, + {"attr": 3}, + {"attr": 4}, + }, + }, + } + expect = Struct{ + Blocks: []Block{ + {Attr: 1}, + {Attr: 2}, + {Attr: 3}, + {Attr: 4}, + }, + } + ) + + var out Struct + err := value.Decode(value.Encode(in), &out) + require.NoError(t, err) + require.Equal(t, expect, out) +} + +func TestDecode_SquashedSlice(t *testing.T) { + type Block struct { + Attr int `river:"attr,attr"` + } + + type InnerStruct struct { + BlockA Block `river:"a,block,optional"` + BlockB Block `river:"b,block,optional"` + BlockC Block `river:"c,block,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner []InnerStruct `river:"block,enum"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + in = map[string]interface{}{ + "outer_field_1": "value1", + "outer_field_2": "value2", + + "block": []map[string]interface{}{ + {"a": map[string]interface{}{"attr": 1}}, + {"b": map[string]interface{}{"attr": 2}}, + {"c": map[string]interface{}{"attr": 3}}, + {"a": map[string]interface{}{"attr": 4}}, + }, + } + expect = OuterStruct{ + OuterField1: "value1", + OuterField2: "value2", + + Inner: []InnerStruct{ + {BlockA: Block{Attr: 1}}, + {BlockB: Block{Attr: 2}}, + {BlockC: Block{Attr: 3}}, + {BlockA: Block{Attr: 4}}, + }, + } + ) + + var out OuterStruct + err := value.Decode(value.Encode(in), &out) + require.NoError(t, err) + require.Equal(t, expect, out) +} + +func TestDecode_SquashedSlice_Pointer(t *testing.T) { + type Block struct { + Attr int `river:"attr,attr"` + } + + type InnerStruct struct { + BlockA *Block `river:"a,block,optional"` + BlockB *Block `river:"b,block,optional"` + BlockC *Block `river:"c,block,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner []InnerStruct `river:"block,enum"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + in = map[string]interface{}{ + "outer_field_1": "value1", + "outer_field_2": "value2", + + "block": []map[string]interface{}{ + {"a": map[string]interface{}{"attr": 1}}, + {"b": map[string]interface{}{"attr": 2}}, + {"c": map[string]interface{}{"attr": 3}}, + {"a": map[string]interface{}{"attr": 4}}, + }, + } + expect = OuterStruct{ + OuterField1: "value1", + OuterField2: "value2", + + Inner: []InnerStruct{ + {BlockA: &Block{Attr: 1}}, + {BlockB: &Block{Attr: 2}}, + {BlockC: &Block{Attr: 3}}, + {BlockA: &Block{Attr: 4}}, + }, + } + ) + + var out OuterStruct + err := value.Decode(value.Encode(in), &out) + require.NoError(t, err) + require.Equal(t, expect, out) +} + +// TestDecode_KnownTypes_Any asserts that decoding River values into an +// any/interface{} results in known types. +func TestDecode_KnownTypes_Any(t *testing.T) { + tt := []struct { + input any + expect any + }{ + // expect "int" + {int(0), 0}, + {int(-1), -1}, + {int(15), 15}, + {int8(15), 15}, + {int16(15), 15}, + {int32(15), 15}, + {int64(15), 15}, + {uint(0), 0}, + {uint(15), 15}, + {uint8(15), 15}, + {uint16(15), 15}, + {uint32(15), 15}, + {uint64(15), 15}, + {int64(math.MinInt64), math.MinInt64}, + {int64(math.MaxInt64), math.MaxInt64}, + // expect "uint" + {uint64(math.MaxInt64 + 1), uint64(math.MaxInt64 + 1)}, + {uint64(math.MaxUint64), uint64(math.MaxUint64)}, + // expect "float" + {float32(2.5), float64(2.5)}, + {float64(2.5), float64(2.5)}, + {float64(math.MinInt64) - 10, float64(math.MinInt64) - 10}, + {float64(math.MaxInt64) + 10, float64(math.MaxInt64) + 10}, + + {bool(true), bool(true)}, + {string("Hello"), string("Hello")}, + + { + input: []int{1, 2, 3}, + expect: []any{1, 2, 3}, + }, + + { + input: map[string]int{"number": 15}, + expect: map[string]any{"number": 15}, + }, + { + input: struct { + Name string `river:"name,attr"` + }{Name: "John"}, + + expect: map[string]any{"name": "John"}, + }, + } + + t.Run("basic types", func(t *testing.T) { + for _, tc := range tt { + var actual any + err := value.Decode(value.Encode(tc.input), &actual) + + if assert.NoError(t, err) { + assert.Equal(t, tc.expect, actual, + "Expected %[1]v (%[1]T) to transcode to %[2]v (%[2]T)", tc.input, tc.expect) + } + } + }) + + t.Run("inside maps", func(t *testing.T) { + for _, tc := range tt { + input := map[string]any{ + "key": tc.input, + } + + var actual map[string]any + err := value.Decode(value.Encode(input), &actual) + + if assert.NoError(t, err) { + assert.Equal(t, tc.expect, actual["key"], + "Expected %[1]v (%[1]T) to transcode to %[2]v (%[2]T) inside a map", tc.input, tc.expect) + } + } + }) +} + +func TestRetainCapsulePointer(t *testing.T) { + capsuleVal := &capsule{} + + in := map[string]any{ + "foo": capsuleVal, + } + + var actual map[string]any + err := value.Decode(value.Encode(in), &actual) + require.NoError(t, err) + + expect := map[string]any{ + "foo": capsuleVal, + } + require.Equal(t, expect, actual) +} + +type capsule struct{} + +func (*capsule) RiverCapsule() {} diff --git a/syntax/internal/value/errors.go b/syntax/internal/value/errors.go new file mode 100644 index 0000000000..79f22378b3 --- /dev/null +++ b/syntax/internal/value/errors.go @@ -0,0 +1,107 @@ +package value + +import "fmt" + +// Error is used for reporting on a value-level error. It is the most general +// type of error for a value. +type Error struct { + Value Value + Inner error +} + +// TypeError is used for reporting on a value having an unexpected type. +type TypeError struct { + // Value which caused the error. + Value Value + Expected Type +} + +// Error returns the string form of the TypeError. +func (te TypeError) Error() string { + return fmt.Sprintf("expected %s, got %s", te.Expected, te.Value.Type()) +} + +// Error returns the message of the decode error. +func (de Error) Error() string { return de.Inner.Error() } + +// MissingKeyError is used for reporting that a value is missing a key. +type MissingKeyError struct { + Value Value + Missing string +} + +// Error returns the string form of the MissingKeyError. +func (mke MissingKeyError) Error() string { + return fmt.Sprintf("key %q does not exist", mke.Missing) +} + +// ElementError is used to report on an error inside of an array. +type ElementError struct { + Value Value // The Array value + Index int // The index of the element with the issue + Inner error // The error from the element +} + +// Error returns the text of the inner error. +func (ee ElementError) Error() string { return ee.Inner.Error() } + +// FieldError is used to report on an invalid field inside an object. +type FieldError struct { + Value Value // The Object value + Field string // The field name with the issue + Inner error // The error from the field +} + +// Error returns the text of the inner error. +func (fe FieldError) Error() string { return fe.Inner.Error() } + +// ArgError is used to report on an invalid argument to a function. +type ArgError struct { + Function Value + Argument Value + Index int + Inner error +} + +// Error returns the text of the inner error. +func (ae ArgError) Error() string { return ae.Inner.Error() } + +// WalkError walks err for all value-related errors in this package. +// WalkError returns false if err is not an error from this package. +func WalkError(err error, f func(err error)) bool { + var foundOne bool + + nextError := err + for nextError != nil { + switch ne := nextError.(type) { + case Error: + f(nextError) + nextError = ne.Inner + foundOne = true + case TypeError: + f(nextError) + nextError = nil + foundOne = true + case MissingKeyError: + f(nextError) + nextError = nil + foundOne = true + case ElementError: + f(nextError) + nextError = ne.Inner + foundOne = true + case FieldError: + f(nextError) + nextError = ne.Inner + foundOne = true + case ArgError: + f(nextError) + nextError = ne.Inner + foundOne = true + default: + nextError = nil + } + } + + return foundOne +} diff --git a/syntax/internal/value/number_value.go b/syntax/internal/value/number_value.go new file mode 100644 index 0000000000..c40fbbc802 --- /dev/null +++ b/syntax/internal/value/number_value.go @@ -0,0 +1,135 @@ +package value + +import ( + "math" + "reflect" + "strconv" +) + +var ( + nativeIntBits = reflect.TypeOf(int(0)).Bits() + nativeUintBits = reflect.TypeOf(uint(0)).Bits() +) + +// NumberKind categorizes a type of Go number. +type NumberKind uint8 + +const ( + // NumberKindInt represents an int-like type (e.g., int, int8, etc.). + NumberKindInt NumberKind = iota + // NumberKindUint represents a uint-like type (e.g., uint, uint8, etc.). + NumberKindUint + // NumberKindFloat represents both float32 and float64. + NumberKindFloat +) + +// makeNumberKind converts a Go kind to a River kind. +func makeNumberKind(k reflect.Kind) NumberKind { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return NumberKindInt + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return NumberKindUint + case reflect.Float32, reflect.Float64: + return NumberKindFloat + default: + panic("river/value: makeNumberKind called with unsupported Kind value") + } +} + +// Number is a generic representation of Go numbers. It is intended to be +// created on the fly for numerical operations when the real number type is not +// known. +type Number struct { + // Value holds the raw data for the number. Note that for numberKindFloat, + // value is the raw bits of the float64 and must be converted back to a + // float64 before it can be used. + value uint64 + + bits uint8 // 8, 16, 32, 64, used for overflow checking + k NumberKind // int, uint, float +} + +func newNumberValue(v reflect.Value) Number { + var ( + val uint64 + bits int + nk NumberKind + ) + + switch v.Kind() { + case reflect.Int: + val, bits, nk = uint64(v.Int()), nativeIntBits, NumberKindInt + case reflect.Int8: + val, bits, nk = uint64(v.Int()), 8, NumberKindInt + case reflect.Int16: + val, bits, nk = uint64(v.Int()), 16, NumberKindInt + case reflect.Int32: + val, bits, nk = uint64(v.Int()), 32, NumberKindInt + case reflect.Int64: + val, bits, nk = uint64(v.Int()), 64, NumberKindInt + case reflect.Uint: + val, bits, nk = v.Uint(), nativeUintBits, NumberKindUint + case reflect.Uint8: + val, bits, nk = v.Uint(), 8, NumberKindUint + case reflect.Uint16: + val, bits, nk = v.Uint(), 16, NumberKindUint + case reflect.Uint32: + val, bits, nk = v.Uint(), 32, NumberKindUint + case reflect.Uint64: + val, bits, nk = v.Uint(), 64, NumberKindUint + case reflect.Float32: + val, bits, nk = math.Float64bits(v.Float()), 32, NumberKindFloat + case reflect.Float64: + val, bits, nk = math.Float64bits(v.Float()), 64, NumberKindFloat + default: + panic("river/value: unrecognized Go number type " + v.Kind().String()) + } + + return Number{val, uint8(bits), nk} +} + +// Kind returns the Number's NumberKind. +func (nv Number) Kind() NumberKind { return nv.k } + +// Int converts the Number into an int64. +func (nv Number) Int() int64 { + if nv.k == NumberKindFloat { + return int64(math.Float64frombits(nv.value)) + } + return int64(nv.value) +} + +// Uint converts the Number into a uint64. +func (nv Number) Uint() uint64 { + if nv.k == NumberKindFloat { + return uint64(math.Float64frombits(nv.value)) + } + return nv.value +} + +// Float converts the Number into a float64. +func (nv Number) Float() float64 { + switch nv.k { + case NumberKindInt: + // Convert nv.value to an int64 before converting to a float64 so the sign + // flag gets handled correctly. + return float64(int64(nv.value)) + case NumberKindFloat: + return math.Float64frombits(nv.value) + } + return float64(nv.value) +} + +// ToString converts the Number to a string. +func (nv Number) ToString() string { + switch nv.k { + case NumberKindUint: + return strconv.FormatUint(nv.value, 10) + case NumberKindInt: + return strconv.FormatInt(int64(nv.value), 10) + case NumberKindFloat: + return strconv.FormatFloat(math.Float64frombits(nv.value), 'f', -1, 64) + } + panic("river/value: unreachable") +} diff --git a/syntax/internal/value/raw_function.go b/syntax/internal/value/raw_function.go new file mode 100644 index 0000000000..bf25da916d --- /dev/null +++ b/syntax/internal/value/raw_function.go @@ -0,0 +1,9 @@ +package value + +// RawFunction allows creating function implementations using raw River values. +// This is useful for functions which wish to operate over dynamic types while +// avoiding decoding to interface{} for performance reasons. +// +// The func value itself is provided as an argument so error types can be +// filled. +type RawFunction func(funcValue Value, args ...Value) (Value, error) diff --git a/syntax/internal/value/tag_cache.go b/syntax/internal/value/tag_cache.go new file mode 100644 index 0000000000..2bce16209d --- /dev/null +++ b/syntax/internal/value/tag_cache.go @@ -0,0 +1,121 @@ +package value + +import ( + "reflect" + + "github.com/grafana/river/internal/rivertags" +) + +// tagsCache caches the river tags for a struct type. This is never cleared, +// but since most structs will be statically created throughout the lifetime +// of the process, this will consume a negligible amount of memory. +var tagsCache = make(map[reflect.Type]*objectFields) + +func getCachedTags(t reflect.Type) *objectFields { + if t.Kind() != reflect.Struct { + panic("getCachedTags called with non-struct type") + } + + if entry, ok := tagsCache[t]; ok { + return entry + } + + ff := rivertags.Get(t) + + // Build a tree of keys. + tree := &objectFields{ + fields: make(map[string]rivertags.Field), + nestedFields: make(map[string]*objectFields), + keys: []string{}, + } + + for _, f := range ff { + if f.Flags&rivertags.FlagLabel != 0 { + // Skip over label tags. + tree.labelField = f + continue + } + + node := tree + for i, name := range f.Name { + // Add to the list of keys if this is a new key. + if node.Has(name) == objectKeyTypeInvalid { + node.keys = append(node.keys, name) + } + + if i+1 == len(f.Name) { + // Last fragment, add as a field. + node.fields[name] = f + continue + } + + inner, ok := node.nestedFields[name] + if !ok { + inner = &objectFields{ + fields: make(map[string]rivertags.Field), + nestedFields: make(map[string]*objectFields), + keys: []string{}, + } + node.nestedFields[name] = inner + } + node = inner + } + } + + tagsCache[t] = tree + return tree +} + +// objectFields is a parsed tree of fields in rivertags. It forms a tree where +// leaves are nested fields (e.g., for block names that have multiple name +// fragments) and nodes are the fields themselves. +type objectFields struct { + fields map[string]rivertags.Field + nestedFields map[string]*objectFields + keys []string // Combination of fields + nestedFields + labelField rivertags.Field +} + +type objectKeyType int + +const ( + objectKeyTypeInvalid objectKeyType = iota + objectKeyTypeField + objectKeyTypeNestedField +) + +// Has returns whether name exists as a field or a nested key inside keys. +// Returns objectKeyTypeInvalid if name does not exist as either. +func (of *objectFields) Has(name string) objectKeyType { + if _, ok := of.fields[name]; ok { + return objectKeyTypeField + } + if _, ok := of.nestedFields[name]; ok { + return objectKeyTypeNestedField + } + return objectKeyTypeInvalid +} + +// Len returns the number of named keys. +func (of *objectFields) Len() int { return len(of.keys) } + +// Keys returns all named keys (fields and nested fields). +func (of *objectFields) Keys() []string { return of.keys } + +// Field gets a non-nested field. Returns false if name is a nested field. +func (of *objectFields) Field(name string) (rivertags.Field, bool) { + f, ok := of.fields[name] + return f, ok +} + +// NestedField gets a named nested field entry. Returns false if name is not a +// nested field. +func (of *objectFields) NestedField(name string) (*objectFields, bool) { + nk, ok := of.nestedFields[name] + return nk, ok +} + +// LabelField returns the field used for the label (if any). +func (of *objectFields) LabelField() (rivertags.Field, bool) { + return of.labelField, of.labelField.Index != nil +} diff --git a/syntax/internal/value/type.go b/syntax/internal/value/type.go new file mode 100644 index 0000000000..e79715cbbf --- /dev/null +++ b/syntax/internal/value/type.go @@ -0,0 +1,157 @@ +package value + +import ( + "fmt" + "reflect" +) + +// Type represents the type of a River value loosely. For example, a Value may +// be TypeArray, but this does not imply anything about the type of that +// array's elements (all of which may be any type). +// +// TypeCapsule is a special type which encapsulates arbitrary Go values. +type Type uint8 + +// Supported Type values. +const ( + TypeNull Type = iota + TypeNumber + TypeString + TypeBool + TypeArray + TypeObject + TypeFunction + TypeCapsule +) + +var typeStrings = [...]string{ + TypeNull: "null", + TypeNumber: "number", + TypeString: "string", + TypeBool: "bool", + TypeArray: "array", + TypeObject: "object", + TypeFunction: "function", + TypeCapsule: "capsule", +} + +// String returns the name of t. +func (t Type) String() string { + if int(t) < len(typeStrings) { + return typeStrings[t] + } + return fmt.Sprintf("Type(%d)", t) +} + +// GoString returns the name of t. +func (t Type) GoString() string { return t.String() } + +// RiverType returns the River type from the Go type. +// +// Go types map to River types using the following rules: +// +// 1. Go numbers (ints, uints, floats) map to a River number. +// 2. Go strings map to a River string. +// 3. Go bools map to a River bool. +// 4. Go arrays and slices map to a River array. +// 5. Go map[string]T map to a River object. +// 6. Go structs map to a River object, provided they have at least one field +// with a river tag. +// 7. Valid Go functions map to a River function. +// 8. Go interfaces map to a River capsule. +// 9. All other Go values map to a River capsule. +// +// Go functions are only valid for River if they have one non-error return type +// (the first return type) and one optional error return type (the second +// return type). Other function types are treated as capsules. +// +// As an exception, any type which implements the Capsule interface is forced +// to be a capsule. +func RiverType(t reflect.Type) Type { + // We don't know if the RiverCapsule interface is implemented for a pointer + // or non-pointer type, so we have to check before and after dereferencing. + + for t.Kind() == reflect.Pointer { + switch { + case t.Implements(goCapsule): + return TypeCapsule + case t.Implements(goTextMarshaler): + return TypeString + } + + t = t.Elem() + } + + switch { + case t.Implements(goCapsule): + return TypeCapsule + case t.Implements(goTextMarshaler): + return TypeString + case t == goDuration: + return TypeString + } + + switch t.Kind() { + case reflect.Invalid: + return TypeNull + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return TypeNumber + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return TypeNumber + case reflect.Float32, reflect.Float64: + return TypeNumber + + case reflect.String: + return TypeString + + case reflect.Bool: + return TypeBool + + case reflect.Array, reflect.Slice: + if inner := t.Elem(); inner.Kind() == reflect.Struct { + if _, labeled := getCachedTags(inner).LabelField(); labeled { + // An slice/array of labeled blocks is an object, where each label is a + // top-level key. + return TypeObject + } + } + return TypeArray + + case reflect.Map: + if t.Key() != goString { + // Objects must be keyed by string. Anything else is forced to be a + // Capsule. + return TypeCapsule + } + return TypeObject + + case reflect.Struct: + if getCachedTags(t).Len() == 0 { + return TypeCapsule + } + return TypeObject + + case reflect.Func: + switch t.NumOut() { + case 1: + if t.Out(0) == goError { + return TypeCapsule + } + return TypeFunction + case 2: + if t.Out(0) == goError || t.Out(1) != goError { + return TypeCapsule + } + return TypeFunction + default: + return TypeCapsule + } + + case reflect.Interface: + return TypeCapsule + + default: + return TypeCapsule + } +} diff --git a/syntax/internal/value/type_test.go b/syntax/internal/value/type_test.go new file mode 100644 index 0000000000..10ee04bc75 --- /dev/null +++ b/syntax/internal/value/type_test.go @@ -0,0 +1,80 @@ +package value_test + +import ( + "reflect" + "testing" + + "github.com/grafana/river/internal/value" + "github.com/stretchr/testify/require" +) + +type customCapsule bool + +var _ value.Capsule = (customCapsule)(false) + +func (customCapsule) RiverCapsule() {} + +var typeTests = []struct { + input interface{} + expect value.Type +}{ + {int(0), value.TypeNumber}, + {int8(0), value.TypeNumber}, + {int16(0), value.TypeNumber}, + {int32(0), value.TypeNumber}, + {int64(0), value.TypeNumber}, + {uint(0), value.TypeNumber}, + {uint8(0), value.TypeNumber}, + {uint16(0), value.TypeNumber}, + {uint32(0), value.TypeNumber}, + {uint64(0), value.TypeNumber}, + {float32(0), value.TypeNumber}, + {float64(0), value.TypeNumber}, + + {string(""), value.TypeString}, + + {bool(false), value.TypeBool}, + + {[...]int{0, 1, 2}, value.TypeArray}, + {[]int{0, 1, 2}, value.TypeArray}, + + // Struct with no River tags is a capsule. + {struct{}{}, value.TypeCapsule}, + + // A slice of labeled blocks should be an object. + {[]struct { + Label string `river:",label"` + }{}, value.TypeObject}, + + {map[string]interface{}{}, value.TypeObject}, + + // Go functions must have one non-error return type and one optional error + // return type to be River functions. Everything else is a capsule. + {(func() int)(nil), value.TypeFunction}, + {(func() (int, error))(nil), value.TypeFunction}, + {(func())(nil), value.TypeCapsule}, // Must have non-error return type + {(func() error)(nil), value.TypeCapsule}, // First return type must be non-error + {(func() (error, int))(nil), value.TypeCapsule}, // First return type must be non-error + {(func() (error, error))(nil), value.TypeCapsule}, // First return type must be non-error + {(func() (int, int))(nil), value.TypeCapsule}, // Second return type must be error + {(func() (int, int, int))(nil), value.TypeCapsule}, // Can only have 1 or 2 return types + + {make(chan struct{}), value.TypeCapsule}, + {map[bool]interface{}{}, value.TypeCapsule}, // Maps with non-string types are capsules + + // Types with capsule markers should be capsules. + {customCapsule(false), value.TypeCapsule}, + {(*customCapsule)(nil), value.TypeCapsule}, + {(**customCapsule)(nil), value.TypeCapsule}, +} + +func Test_RiverType(t *testing.T) { + for _, tc := range typeTests { + rt := reflect.TypeOf(tc.input) + + t.Run(rt.String(), func(t *testing.T) { + actual := value.RiverType(rt) + require.Equal(t, tc.expect, actual, "Unexpected type for %#v", tc.input) + }) + } +} diff --git a/syntax/internal/value/value.go b/syntax/internal/value/value.go new file mode 100644 index 0000000000..bdd8492c09 --- /dev/null +++ b/syntax/internal/value/value.go @@ -0,0 +1,556 @@ +// Package value holds the internal representation for River values. River +// values act as a lightweight wrapper around reflect.Value. +package value + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/grafana/river/internal/reflectutil" +) + +// Go types used throughout the package. +var ( + goAny = reflect.TypeOf((*interface{})(nil)).Elem() + goString = reflect.TypeOf(string("")) + goByteSlice = reflect.TypeOf([]byte(nil)) + goError = reflect.TypeOf((*error)(nil)).Elem() + goTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + goTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + goStructWrapper = reflect.TypeOf(structWrapper{}) + goCapsule = reflect.TypeOf((*Capsule)(nil)).Elem() + goDuration = reflect.TypeOf((time.Duration)(0)) + goDurationPtr = reflect.TypeOf((*time.Duration)(nil)) + goRiverDefaulter = reflect.TypeOf((*Defaulter)(nil)).Elem() + goRiverDecoder = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + goRiverValidator = reflect.TypeOf((*Validator)(nil)).Elem() + goRawRiverFunc = reflect.TypeOf((RawFunction)(nil)) + goRiverValue = reflect.TypeOf(Null) +) + +// NOTE(rfratto): This package is extremely sensitive to performance, so +// changes should be made with caution; run benchmarks when changing things. +// +// Value is optimized to be as small as possible and exist fully on the stack. +// This allows many values to avoid allocations, with the exception of creating +// arrays and objects. + +// Value represents a River value. +type Value struct { + rv reflect.Value + ty Type +} + +// Null is the null value. +var Null = Value{} + +// Uint returns a Value from a uint64. +func Uint(u uint64) Value { return Value{rv: reflect.ValueOf(u), ty: TypeNumber} } + +// Int returns a Value from an int64. +func Int(i int64) Value { return Value{rv: reflect.ValueOf(i), ty: TypeNumber} } + +// Float returns a Value from a float64. +func Float(f float64) Value { return Value{rv: reflect.ValueOf(f), ty: TypeNumber} } + +// String returns a Value from a string. +func String(s string) Value { return Value{rv: reflect.ValueOf(s), ty: TypeString} } + +// Bool returns a Value from a bool. +func Bool(b bool) Value { return Value{rv: reflect.ValueOf(b), ty: TypeBool} } + +// Object returns a new value from m. A copy of m is made for producing the +// Value. +func Object(m map[string]Value) Value { + return Value{ + rv: reflect.ValueOf(m), + ty: TypeObject, + } +} + +// Array creates an array from the given values. A copy of the vv slice is made +// for producing the Value. +func Array(vv ...Value) Value { + return Value{ + rv: reflect.ValueOf(vv), + ty: TypeArray, + } +} + +// Func makes a new function Value from f. Func panics if f does not map to a +// River function. +func Func(f interface{}) Value { + rv := reflect.ValueOf(f) + if RiverType(rv.Type()) != TypeFunction { + panic("river/value: Func called with non-function type") + } + return Value{rv: rv, ty: TypeFunction} +} + +// Encapsulate creates a new Capsule value from v. Encapsulate panics if v does +// not map to a River capsule. +func Encapsulate(v interface{}) Value { + rv := reflect.ValueOf(v) + if RiverType(rv.Type()) != TypeCapsule { + panic("river/value: Capsule called with non-capsule type") + } + return Value{rv: rv, ty: TypeCapsule} +} + +// Encode creates a new Value from v. If v is a pointer, v must be considered +// immutable and not change while the Value is used. +func Encode(v interface{}) Value { + if v == nil { + return Null + } + return makeValue(reflect.ValueOf(v)) +} + +// FromRaw converts a reflect.Value into a River Value. It is useful to prevent +// downcasting an interface into an any. +func FromRaw(v reflect.Value) Value { + return makeValue(v) +} + +// Type returns the River type for the value. +func (v Value) Type() Type { return v.ty } + +// Describe returns a descriptive type name for the value. For capsule values, +// this prints the underlying Go type name. For other values, it prints the +// normal River type. +func (v Value) Describe() string { + if v.ty != TypeCapsule { + return v.ty.String() + } + return fmt.Sprintf("capsule(%q)", v.rv.Type()) +} + +// Bool returns the boolean value for v. It panics if v is not a bool. +func (v Value) Bool() bool { + if v.ty != TypeBool { + panic("river/value: Bool called on non-bool type") + } + return v.rv.Bool() +} + +// Number returns a Number value for v. It panics if v is not a Number. +func (v Value) Number() Number { + if v.ty != TypeNumber { + panic("river/value: Number called on non-number type") + } + return newNumberValue(v.rv) +} + +// Int returns an int value for v. It panics if v is not a number. +func (v Value) Int() int64 { + if v.ty != TypeNumber { + panic("river/value: Int called on non-number type") + } + switch makeNumberKind(v.rv.Kind()) { + case NumberKindInt: + return v.rv.Int() + case NumberKindUint: + return int64(v.rv.Uint()) + case NumberKindFloat: + return int64(v.rv.Float()) + } + panic("river/value: unreachable") +} + +// Uint returns an uint value for v. It panics if v is not a number. +func (v Value) Uint() uint64 { + if v.ty != TypeNumber { + panic("river/value: Uint called on non-number type") + } + switch makeNumberKind(v.rv.Kind()) { + case NumberKindInt: + return uint64(v.rv.Int()) + case NumberKindUint: + return v.rv.Uint() + case NumberKindFloat: + return uint64(v.rv.Float()) + } + panic("river/value: unreachable") +} + +// Float returns a float value for v. It panics if v is not a number. +func (v Value) Float() float64 { + if v.ty != TypeNumber { + panic("river/value: Float called on non-number type") + } + switch makeNumberKind(v.rv.Kind()) { + case NumberKindInt: + return float64(v.rv.Int()) + case NumberKindUint: + return float64(v.rv.Uint()) + case NumberKindFloat: + return v.rv.Float() + } + panic("river/value: unreachable") +} + +// Text returns a string value of v. It panics if v is not a string. +func (v Value) Text() string { + if v.ty != TypeString { + panic("river/value: Text called on non-string type") + } + + // Attempt to get an address to v.rv for interface checking. + // + // The normal v.rv value is used for other checks. + addrRV := v.rv + if addrRV.CanAddr() { + addrRV = addrRV.Addr() + } + switch { + case addrRV.Type().Implements(goTextMarshaler): + // TODO(rfratto): what should we do if this fails? + text, _ := addrRV.Interface().(encoding.TextMarshaler).MarshalText() + return string(text) + + case v.rv.Type() == goDuration: + // Special case: v.rv is a duration and its String method should be used. + return v.rv.Interface().(time.Duration).String() + + default: + return v.rv.String() + } +} + +// Len returns the length of v. Panics if v is not an array or object. +func (v Value) Len() int { + switch v.ty { + case TypeArray: + return v.rv.Len() + case TypeObject: + switch { + case v.rv.Type() == goStructWrapper: + return v.rv.Interface().(structWrapper).Len() + case v.rv.Kind() == reflect.Array, v.rv.Kind() == reflect.Slice: // Array of labeled blocks + return v.rv.Len() + case v.rv.Kind() == reflect.Struct: + return getCachedTags(v.rv.Type()).Len() + case v.rv.Kind() == reflect.Map: + return v.rv.Len() + } + } + panic("river/value: Len called on non-array and non-object value") +} + +// Index returns index i of the Value. Panics if the value is not an array or +// if it is out of bounds of the array's size. +func (v Value) Index(i int) Value { + if v.ty != TypeArray { + panic("river/value: Index called on non-array value") + } + return makeValue(v.rv.Index(i)) +} + +// Interface returns the underlying Go value for the Value. +func (v Value) Interface() interface{} { + if v.ty == TypeNull { + return nil + } + return v.rv.Interface() +} + +// Reflect returns the raw reflection value backing v. +func (v Value) Reflect() reflect.Value { return v.rv } + +// makeValue converts a reflect value into a Value, dereferencing any pointers or +// interface{} values. +func makeValue(v reflect.Value) Value { + // Early check: if v is interface{}, we need to deference it to get the + // concrete value. + if v.IsValid() && v.Type() == goAny { + v = v.Elem() + } + + // Special case: a reflect.Value may be a value.Value when it's coming from a + // River array or object. We can unwrap the inner value here before + // continuing. + if v.IsValid() && v.Type() == goRiverValue { + // Unwrap the inner value. + v = v.Interface().(Value).rv + } + + // Before we get the River type of the Value, we need to see if it's possible + // to get a pointer to v. This ensures that if v is a non-pointer field of an + // addressable struct, still detect the type of v as if it was a pointer. + if v.CanAddr() { + v = v.Addr() + } + + if !v.IsValid() { + return Null + } + riverType := RiverType(v.Type()) + + // Finally, deference the pointer fully and use the type we detected. + for v.Kind() == reflect.Pointer { + if v.IsNil() { + return Null + } + v = v.Elem() + } + return Value{rv: v, ty: riverType} +} + +// OrderedKeys reports if v represents an object with consistently ordered +// keys. It panics if v is not an object. +func (v Value) OrderedKeys() bool { + if v.ty != TypeObject { + panic("river/value: OrderedKeys called on non-object value") + } + + // Maps are the only type of unordered River object, since their keys can't + // be iterated over in a deterministic order. Every other type of River + // object comes from a struct or a slice where the order of keys stays the + // same. + return v.rv.Kind() != reflect.Map +} + +// Keys returns the keys in v in unspecified order. It panics if v is not an +// object. +func (v Value) Keys() []string { + if v.ty != TypeObject { + panic("river/value: Keys called on non-object value") + } + + switch { + case v.rv.Type() == goStructWrapper: + return v.rv.Interface().(structWrapper).Keys() + + case v.rv.Kind() == reflect.Struct: + return wrapStruct(v.rv, true).Keys() + + case v.rv.Kind() == reflect.Array, v.rv.Kind() == reflect.Slice: + // List of labeled blocks. + labelField, _ := getCachedTags(v.rv.Type().Elem()).LabelField() + + keys := make([]string, v.rv.Len()) + for i := range keys { + keys[i] = reflectutil.Get(v.rv.Index(i), labelField).String() + } + return keys + + case v.rv.Kind() == reflect.Map: + reflectKeys := v.rv.MapKeys() + res := make([]string, len(reflectKeys)) + for i, rk := range reflectKeys { + res[i] = rk.String() + } + return res + } + + panic("river/value: unreachable") +} + +// Key returns the value for a key in v. It panics if v is not an object. ok +// will be false if the key did not exist in the object. +func (v Value) Key(key string) (index Value, ok bool) { + if v.ty != TypeObject { + panic("river/value: Key called on non-object value") + } + + switch { + case v.rv.Type() == goStructWrapper: + return v.rv.Interface().(structWrapper).Key(key) + case v.rv.Kind() == reflect.Struct: + // We return the struct with the label intact. + return wrapStruct(v.rv, true).Key(key) + case v.rv.Kind() == reflect.Map: + val := v.rv.MapIndex(reflect.ValueOf(key)) + if !val.IsValid() { + return Null, false + } + return makeValue(val), true + + case v.rv.Kind() == reflect.Slice, v.rv.Kind() == reflect.Array: + // List of labeled blocks. + labelField, _ := getCachedTags(v.rv.Type().Elem()).LabelField() + + for i := 0; i < v.rv.Len(); i++ { + elem := v.rv.Index(i) + + label := reflectutil.Get(elem, labelField).String() + if label == key { + // We discard the label since the key here represents the label value. + ws := wrapStruct(elem, false) + return ws.Value(), true + } + } + default: + panic("river/value: unreachable") + } + + return +} + +// Call invokes a function value with the provided arguments. It panics if v is +// not a function. If v is a variadic function, args should be the full flat +// list of arguments. +// +// An ArgError will be returned if one of the arguments is invalid. An Error +// will be returned if the function call returns an error or if the number of +// arguments doesn't match. +func (v Value) Call(args ...Value) (Value, error) { + if v.ty != TypeFunction { + panic("river/value: Call called on non-function type") + } + + if v.rv.Type() == goRawRiverFunc { + return v.rv.Interface().(RawFunction)(v, args...) + } + + var ( + variadic = v.rv.Type().IsVariadic() + expectedArgs = v.rv.Type().NumIn() + ) + + if variadic && len(args) < expectedArgs-1 { + return Null, Error{ + Value: v, + Inner: fmt.Errorf("expected at least %d args, got %d", expectedArgs-1, len(args)), + } + } else if !variadic && len(args) != expectedArgs { + return Null, Error{ + Value: v, + Inner: fmt.Errorf("expected %d args, got %d", expectedArgs, len(args)), + } + } + + reflectArgs := make([]reflect.Value, len(args)) + for i, arg := range args { + var argVal reflect.Value + if variadic && i >= expectedArgs-1 { + argType := v.rv.Type().In(expectedArgs - 1).Elem() + argVal = reflect.New(argType).Elem() + } else { + argType := v.rv.Type().In(i) + argVal = reflect.New(argType).Elem() + } + + var d decoder + if err := d.decode(arg, argVal); err != nil { + return Null, ArgError{ + Function: v, + Argument: arg, + Index: i, + Inner: err, + } + } + + reflectArgs[i] = argVal + } + + outs := v.rv.Call(reflectArgs) + switch len(outs) { + case 1: + return makeValue(outs[0]), nil + case 2: + // When there's 2 return values, the second is always an error. + err, _ := outs[1].Interface().(error) + if err != nil { + return Null, Error{Value: v, Inner: err} + } + return makeValue(outs[0]), nil + + default: + // It's not possible to reach here; we enforce that function values always + // have 1 or 2 return values. + panic("river/value: unreachable") + } +} + +func convertValue(val Value, toType Type) (Value, error) { + // TODO(rfratto): Use vm benchmarks to see if making this a method on Value + // changes anything. + + fromType := val.Type() + + if fromType == toType { + // no-op: val is already the right kind. + return val, nil + } + + switch fromType { + case TypeNumber: + switch toType { + case TypeString: // number -> string + strVal := newNumberValue(val.rv).ToString() + return makeValue(reflect.ValueOf(strVal)), nil + } + + case TypeString: + sourceStr := val.rv.String() + + switch toType { + case TypeNumber: // string -> number + switch { + case sourceStr == "": + return Null, TypeError{Value: val, Expected: toType} + + case sourceStr[0] == '-': + // String starts with a -; parse as a signed int. + parsed, err := strconv.ParseInt(sourceStr, 10, 64) + if err != nil { + return Null, TypeError{Value: val, Expected: toType} + } + return Int(parsed), nil + case strings.ContainsAny(sourceStr, ".eE"): + // String contains something that a floating-point number would use; + // convert. + parsed, err := strconv.ParseFloat(sourceStr, 64) + if err != nil { + return Null, TypeError{Value: val, Expected: toType} + } + return Float(parsed), nil + default: + // Otherwise, treat the number as an unsigned int. + parsed, err := strconv.ParseUint(sourceStr, 10, 64) + if err != nil { + return Null, TypeError{Value: val, Expected: toType} + } + return Uint(parsed), nil + } + } + } + + return Null, TypeError{Value: val, Expected: toType} +} + +func convertGoNumber(nval Number, target reflect.Type) reflect.Value { + switch target.Kind() { + case reflect.Int: + return reflect.ValueOf(int(nval.Int())) + case reflect.Int8: + return reflect.ValueOf(int8(nval.Int())) + case reflect.Int16: + return reflect.ValueOf(int16(nval.Int())) + case reflect.Int32: + return reflect.ValueOf(int32(nval.Int())) + case reflect.Int64: + return reflect.ValueOf(nval.Int()) + case reflect.Uint: + return reflect.ValueOf(uint(nval.Uint())) + case reflect.Uint8: + return reflect.ValueOf(uint8(nval.Uint())) + case reflect.Uint16: + return reflect.ValueOf(uint16(nval.Uint())) + case reflect.Uint32: + return reflect.ValueOf(uint32(nval.Uint())) + case reflect.Uint64: + return reflect.ValueOf(nval.Uint()) + case reflect.Float32: + return reflect.ValueOf(float32(nval.Float())) + case reflect.Float64: + return reflect.ValueOf(nval.Float()) + } + + panic("unsupported number conversion") +} diff --git a/syntax/internal/value/value_object.go b/syntax/internal/value/value_object.go new file mode 100644 index 0000000000..6a642cb22f --- /dev/null +++ b/syntax/internal/value/value_object.go @@ -0,0 +1,119 @@ +package value + +import ( + "reflect" + + "github.com/grafana/river/internal/reflectutil" +) + +// structWrapper allows for partially traversing structs which contain fields +// representing blocks. This is required due to how block names and labels +// change the object representation. +// +// If a block name is a.b.c, then it is represented as three nested objects: +// +// { +// a = { +// b = { +// c = { /* block contents */ }, +// }, +// } +// } +// +// Similarly, if a block name is labeled (a.b.c "label"), then the label is the +// top-level key after c. +// +// structWrapper exposes Len, Keys, and Key methods similar to Value to allow +// traversing through the synthetic object. The values it returns are +// structWrappers. +// +// Code in value.go MUST check to see if a struct is a structWrapper *before* +// checking the value kind to ensure the appropriate methods are invoked. +type structWrapper struct { + structVal reflect.Value + fields *objectFields + label string // Non-empty string if this struct is wrapped in a label. +} + +func wrapStruct(val reflect.Value, keepLabel bool) structWrapper { + if val.Kind() != reflect.Struct { + panic("river/value: wrapStruct called on non-struct value") + } + + fields := getCachedTags(val.Type()) + + var label string + if f, ok := fields.LabelField(); ok && keepLabel { + label = reflectutil.Get(val, f).String() + } + + return structWrapper{ + structVal: val, + fields: fields, + label: label, + } +} + +// Value turns sw into a value. +func (sw structWrapper) Value() Value { + return Value{ + rv: reflect.ValueOf(sw), + ty: TypeObject, + } +} + +func (sw structWrapper) Len() int { + if len(sw.label) > 0 { + return 1 + } + return sw.fields.Len() +} + +func (sw structWrapper) Keys() []string { + if len(sw.label) > 0 { + return []string{sw.label} + } + return sw.fields.Keys() +} + +func (sw structWrapper) Key(key string) (index Value, ok bool) { + if len(sw.label) > 0 { + if key != sw.label { + return + } + next := reflect.ValueOf(structWrapper{ + structVal: sw.structVal, + fields: sw.fields, + // Unset the label now that we've traversed it + }) + return Value{rv: next, ty: TypeObject}, true + } + + keyType := sw.fields.Has(key) + + switch keyType { + case objectKeyTypeInvalid: + return // No such key + + case objectKeyTypeNestedField: + // Continue traversing. + nextNode, _ := sw.fields.NestedField(key) + return Value{ + rv: reflect.ValueOf(structWrapper{ + structVal: sw.structVal, + fields: nextNode, + }), + ty: TypeObject, + }, true + + case objectKeyTypeField: + f, _ := sw.fields.Field(key) + val, err := sw.structVal.FieldByIndexErr(f.Index) + if err != nil { + return Null, true + } + return makeValue(val), true + } + + panic("river/value: unreachable") +} diff --git a/syntax/internal/value/value_object_test.go b/syntax/internal/value/value_object_test.go new file mode 100644 index 0000000000..56d72a6102 --- /dev/null +++ b/syntax/internal/value/value_object_test.go @@ -0,0 +1,205 @@ +package value_test + +import ( + "testing" + + "github.com/grafana/river/internal/value" + "github.com/stretchr/testify/require" +) + +// TestBlockRepresentation ensures that the struct tags for blocks are +// represented correctly. +func TestBlockRepresentation(t *testing.T) { + type UnlabledBlock struct { + Value int `river:"value,attr"` + } + type LabeledBlock struct { + Value int `river:"value,attr"` + Label string `river:",label"` + } + type OuterBlock struct { + Attr1 string `river:"attr_1,attr"` + Attr2 string `river:"attr_2,attr"` + + UnlabledBlock1 UnlabledBlock `river:"unlabeled.a,block"` + UnlabledBlock2 UnlabledBlock `river:"unlabeled.b,block"` + UnlabledBlock3 UnlabledBlock `river:"other_unlabeled,block"` + + LabeledBlock1 LabeledBlock `river:"labeled.a,block"` + LabeledBlock2 LabeledBlock `river:"labeled.b,block"` + LabeledBlock3 LabeledBlock `river:"other_labeled,block"` + } + + val := OuterBlock{ + Attr1: "value_1", + Attr2: "value_2", + UnlabledBlock1: UnlabledBlock{ + Value: 1, + }, + UnlabledBlock2: UnlabledBlock{ + Value: 2, + }, + UnlabledBlock3: UnlabledBlock{ + Value: 3, + }, + LabeledBlock1: LabeledBlock{ + Value: 4, + Label: "label_a", + }, + LabeledBlock2: LabeledBlock{ + Value: 5, + Label: "label_b", + }, + LabeledBlock3: LabeledBlock{ + Value: 6, + Label: "label_c", + }, + } + + t.Run("Map decode", func(t *testing.T) { + var m map[string]interface{} + require.NoError(t, value.Decode(value.Encode(val), &m)) + + type object = map[string]interface{} + + expect := object{ + "attr_1": "value_1", + "attr_2": "value_2", + "unlabeled": object{ + "a": object{"value": 1}, + "b": object{"value": 2}, + }, + "other_unlabeled": object{"value": 3}, + "labeled": object{ + "a": object{ + "label_a": object{"value": 4}, + }, + "b": object{ + "label_b": object{"value": 5}, + }, + }, + "other_labeled": object{ + "label_c": object{"value": 6}, + }, + } + + require.Equal(t, m, expect) + }) + + t.Run("Object decode from other object", func(t *testing.T) { + // Decode into a separate type which is structurally identical but not + // literally the same. + type OuterBlock2 OuterBlock + + var actualVal OuterBlock2 + require.NoError(t, value.Decode(value.Encode(val), &actualVal)) + require.Equal(t, val, OuterBlock(actualVal)) + }) +} + +// TestSquashedBlockRepresentation ensures that the struct tags for squashed +// blocks are represented correctly. +func TestSquashedBlockRepresentation(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr,optional"` + InnerField2 string `river:"inner_field_2,attr,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner InnerStruct `river:",squash"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + val := OuterStruct{ + OuterField1: "value1", + Inner: InnerStruct{ + InnerField1: "value3", + InnerField2: "value4", + }, + OuterField2: "value2", + } + + t.Run("Map decode", func(t *testing.T) { + var m map[string]interface{} + require.NoError(t, value.Decode(value.Encode(val), &m)) + + type object = map[string]interface{} + + expect := object{ + "outer_field_1": "value1", + "inner_field_1": "value3", + "inner_field_2": "value4", + "outer_field_2": "value2", + } + + require.Equal(t, m, expect) + }) +} + +func TestSliceOfBlocks(t *testing.T) { + type UnlabledBlock struct { + Value int `river:"value,attr"` + } + type LabeledBlock struct { + Value int `river:"value,attr"` + Label string `river:",label"` + } + type OuterBlock struct { + Attr1 string `river:"attr_1,attr"` + Attr2 string `river:"attr_2,attr"` + + Unlabeled []UnlabledBlock `river:"unlabeled,block"` + Labeled []LabeledBlock `river:"labeled,block"` + } + + val := OuterBlock{ + Attr1: "value_1", + Attr2: "value_2", + Unlabeled: []UnlabledBlock{ + {Value: 1}, + {Value: 2}, + {Value: 3}, + }, + Labeled: []LabeledBlock{ + {Label: "label_a", Value: 4}, + {Label: "label_b", Value: 5}, + {Label: "label_c", Value: 6}, + }, + } + + t.Run("Map decode", func(t *testing.T) { + var m map[string]interface{} + require.NoError(t, value.Decode(value.Encode(val), &m)) + + type object = map[string]interface{} + type list = []interface{} + + expect := object{ + "attr_1": "value_1", + "attr_2": "value_2", + "unlabeled": list{ + object{"value": 1}, + object{"value": 2}, + object{"value": 3}, + }, + "labeled": object{ + "label_a": object{"value": 4}, + "label_b": object{"value": 5}, + "label_c": object{"value": 6}, + }, + } + + require.Equal(t, m, expect) + }) + + t.Run("Object decode from other object", func(t *testing.T) { + // Decode into a separate type which is structurally identical but not + // literally the same. + type OuterBlock2 OuterBlock + + var actualVal OuterBlock2 + require.NoError(t, value.Decode(value.Encode(val), &actualVal)) + require.Equal(t, val, OuterBlock(actualVal)) + }) +} diff --git a/syntax/internal/value/value_test.go b/syntax/internal/value/value_test.go new file mode 100644 index 0000000000..4583e5196d --- /dev/null +++ b/syntax/internal/value/value_test.go @@ -0,0 +1,243 @@ +package value_test + +import ( + "fmt" + "io" + "testing" + + "github.com/grafana/river/internal/value" + "github.com/stretchr/testify/require" +) + +// TestEncodeKeyLookup tests where Go values are retained correctly +// throughout values with a key lookup. +func TestEncodeKeyLookup(t *testing.T) { + type Body struct { + Data pointerMarshaler `river:"data,attr"` + } + + tt := []struct { + name string + encodeTarget any + key string + + expectBodyType value.Type + expectKeyExists bool + expectKeyValue value.Value + expectKeyType value.Type + }{ + { + name: "Struct Encode data Key", + encodeTarget: &Body{}, + key: "data", + expectBodyType: value.TypeObject, + expectKeyExists: true, + expectKeyValue: value.String("Hello, world!"), + expectKeyType: value.TypeString, + }, + { + name: "Struct Encode Missing Key", + encodeTarget: &Body{}, + key: "missing", + expectBodyType: value.TypeObject, + expectKeyExists: false, + expectKeyValue: value.Null, + expectKeyType: value.TypeNull, + }, + { + name: "Map Encode data Key", + encodeTarget: map[string]string{"data": "Hello, world!"}, + key: "data", + expectBodyType: value.TypeObject, + expectKeyExists: true, + expectKeyValue: value.String("Hello, world!"), + expectKeyType: value.TypeString, + }, + { + name: "Map Encode Missing Key", + encodeTarget: map[string]string{"data": "Hello, world!"}, + key: "missing", + expectBodyType: value.TypeObject, + expectKeyExists: false, + expectKeyValue: value.Null, + expectKeyType: value.TypeNull, + }, + { + name: "Map Encode empty value Key", + encodeTarget: map[string]string{"data": ""}, + key: "data", + expectBodyType: value.TypeObject, + expectKeyExists: true, + expectKeyValue: value.String(""), + expectKeyType: value.TypeString, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + bodyVal := value.Encode(tc.encodeTarget) + require.Equal(t, tc.expectBodyType, bodyVal.Type()) + + val, ok := bodyVal.Key(tc.key) + require.Equal(t, tc.expectKeyExists, ok) + require.Equal(t, tc.expectKeyType, val.Type()) + switch val.Type() { + case value.TypeString: + require.Equal(t, tc.expectKeyValue.Text(), val.Text()) + case value.TypeNull: + require.Equal(t, tc.expectKeyValue, val) + default: + require.Fail(t, "unexpected value type (this switch can be expanded)") + } + }) + } +} + +// TestEncodeNoKeyLookup tests where Go values are retained correctly +// throughout values without a key lookup. +func TestEncodeNoKeyLookup(t *testing.T) { + tt := []struct { + name string + encodeTarget any + key string + + expectBodyType value.Type + expectBodyText string + }{ + { + name: "Encode", + encodeTarget: &pointerMarshaler{}, + expectBodyType: value.TypeString, + expectBodyText: "Hello, world!", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + bodyVal := value.Encode(tc.encodeTarget) + require.Equal(t, tc.expectBodyType, bodyVal.Type()) + require.Equal(t, "Hello, world!", bodyVal.Text()) + }) + } +} + +type pointerMarshaler struct{} + +func (*pointerMarshaler) MarshalText() ([]byte, error) { + return []byte("Hello, world!"), nil +} + +func TestValue_Call(t *testing.T) { + t.Run("simple", func(t *testing.T) { + add := func(a, b int) int { return a + b } + addVal := value.Encode(add) + + res, err := addVal.Call( + value.Int(15), + value.Int(43), + ) + require.NoError(t, err) + require.Equal(t, int64(15+43), res.Int()) + }) + + t.Run("fully variadic", func(t *testing.T) { + add := func(nums ...int) int { + var sum int + for _, num := range nums { + sum += num + } + return sum + } + addVal := value.Encode(add) + + t.Run("no args", func(t *testing.T) { + res, err := addVal.Call() + require.NoError(t, err) + require.Equal(t, int64(0), res.Int()) + }) + + t.Run("one arg", func(t *testing.T) { + res, err := addVal.Call(value.Int(32)) + require.NoError(t, err) + require.Equal(t, int64(32), res.Int()) + }) + + t.Run("many args", func(t *testing.T) { + res, err := addVal.Call( + value.Int(32), + value.Int(59), + value.Int(12), + ) + require.NoError(t, err) + require.Equal(t, int64(32+59+12), res.Int()) + }) + }) + + t.Run("partially variadic", func(t *testing.T) { + add := func(firstNum int, nums ...int) int { + sum := firstNum + for _, num := range nums { + sum += num + } + return sum + } + addVal := value.Encode(add) + + t.Run("no variadic args", func(t *testing.T) { + res, err := addVal.Call(value.Int(52)) + require.NoError(t, err) + require.Equal(t, int64(52), res.Int()) + }) + + t.Run("one variadic arg", func(t *testing.T) { + res, err := addVal.Call(value.Int(52), value.Int(32)) + require.NoError(t, err) + require.Equal(t, int64(52+32), res.Int()) + }) + + t.Run("many variadic args", func(t *testing.T) { + res, err := addVal.Call( + value.Int(32), + value.Int(59), + value.Int(12), + ) + require.NoError(t, err) + require.Equal(t, int64(32+59+12), res.Int()) + }) + }) + + t.Run("returns error", func(t *testing.T) { + failWhenTrue := func(val bool) (int, error) { + if val { + return 0, fmt.Errorf("function failed for a very good reason") + } + return 0, nil + } + funcVal := value.Encode(failWhenTrue) + + t.Run("no error", func(t *testing.T) { + res, err := funcVal.Call(value.Bool(false)) + require.NoError(t, err) + require.Equal(t, int64(0), res.Int()) + }) + + t.Run("error", func(t *testing.T) { + _, err := funcVal.Call(value.Bool(true)) + require.EqualError(t, err, "function failed for a very good reason") + }) + }) +} + +func TestValue_Interface_In_Array(t *testing.T) { + type Container struct { + Field io.Closer `river:"field,attr"` + } + + val := value.Encode(Container{Field: io.NopCloser(nil)}) + fieldVal, ok := val.Key("field") + require.True(t, ok, "field not found in object") + require.Equal(t, value.TypeCapsule, fieldVal.Type()) + + arrVal := value.Array(fieldVal) + require.Equal(t, value.TypeCapsule, arrVal.Index(0).Type()) +} diff --git a/syntax/parser/error_test.go b/syntax/parser/error_test.go new file mode 100644 index 0000000000..feb8602e31 --- /dev/null +++ b/syntax/parser/error_test.go @@ -0,0 +1,148 @@ +package parser + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + "github.com/grafana/river/diag" + "github.com/grafana/river/scanner" + "github.com/grafana/river/token" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// This file implements a parser test harness. The files in the testdata +// directory are parsed and the errors reported are compared against the error +// messages expected in the test files. +// +// Expected errors are indicated in the test files by putting a comment of the +// form /* ERROR "rx" */ immediately following an offending token. The harness +// will verify that an error matching the regular expression rx is reported at +// that source position. + +// ERROR comments must be of the form /* ERROR "rx" */ and rx is a regular +// expression that matches the expected error message. The special form +// /* ERROR HERE "rx" */ must be used for error messages that appear immediately +// after a token rather than at a token's position. +var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`) + +// expectedErrors collects the regular expressions of ERROR comments found in +// files and returns them as a map of error positions to error messages. +func expectedErrors(file *token.File, src []byte) map[token.Pos]string { + errors := make(map[token.Pos]string) + + s := scanner.New(file, src, nil, scanner.IncludeComments) + + var ( + prev token.Pos // Position of last non-comment, non-terminator token + here token.Pos // Position following after token at prev + ) + + for { + pos, tok, lit := s.Scan() + switch tok { + case token.EOF: + return errors + case token.COMMENT: + s := errRx.FindStringSubmatch(lit) + if len(s) == 3 { + pos := prev + if s[1] == "HERE" { + pos = here + } + errors[pos] = s[2] + } + case token.TERMINATOR: + if lit == "\n" { + break + } + fallthrough + default: + prev = pos + var l int // Token length + if isLiteral(tok) { + l = len(lit) + } else { + l = len(tok.String()) + } + here = prev.Add(l) + } + } +} + +func isLiteral(t token.Token) bool { + switch t { + case token.IDENT, token.NUMBER, token.FLOAT, token.STRING: + return true + } + return false +} + +// compareErrors compares the map of expected error messages with the list of +// found errors and reports mismatches. +func compareErrors(t *testing.T, file *token.File, expected map[token.Pos]string, found diag.Diagnostics) { + t.Helper() + + for _, checkError := range found { + pos := file.Pos(checkError.StartPos.Offset) + + if msg, found := expected[pos]; found { + // We expect a message at pos; check if it matches + rx, err := regexp.Compile(msg) + if !assert.NoError(t, err) { + continue + } + assert.True(t, + rx.MatchString(checkError.Message), + "%s: %q does not match %q", + checkError.StartPos, checkError.Message, msg, + ) + delete(expected, pos) // Eliminate consumed error + } else { + assert.Fail(t, + "Unexpected error", + "unexpected error: %s: %s", checkError.StartPos.String(), checkError.Message, + ) + } + } + + // There should be no expected errors left + if len(expected) > 0 { + t.Errorf("%d errors not reported:", len(expected)) + for pos, msg := range expected { + t.Errorf("%s: %s\n", file.PositionFor(pos), msg) + } + } +} + +func TestErrors(t *testing.T) { + list, err := os.ReadDir("testdata") + require.NoError(t, err) + + for _, d := range list { + name := d.Name() + if d.IsDir() || !strings.HasSuffix(name, ".river") { + continue + } + + t.Run(name, func(t *testing.T) { + checkErrors(t, filepath.Join("testdata", name)) + }) + } +} + +func checkErrors(t *testing.T, filename string) { + t.Helper() + + src, err := os.ReadFile(filename) + require.NoError(t, err) + + p := newParser(filename, src) + _ = p.ParseFile() + + expected := expectedErrors(p.file, src) + compareErrors(t, p.file, expected, p.diags) +} diff --git a/syntax/parser/internal.go b/syntax/parser/internal.go new file mode 100644 index 0000000000..1a8b7b7467 --- /dev/null +++ b/syntax/parser/internal.go @@ -0,0 +1,714 @@ +package parser + +import ( + "fmt" + "strings" + + "github.com/grafana/river/ast" + "github.com/grafana/river/diag" + "github.com/grafana/river/scanner" + "github.com/grafana/river/token" +) + +// parser implements the River parser. +// +// It is only safe for callers to use exported methods as entrypoints for +// parsing. +// +// Each Parse* and parse* method will describe the EBNF grammar being used for +// parsing that non-terminal. The EBNF grammar will be written as LL(1) and +// should directly represent the code. +// +// The parser will continue on encountering errors to allow a more complete +// list of errors to be returned to the user. The resulting AST should be +// discarded if errors were encountered during parsing. +type parser struct { + file *token.File + diags diag.Diagnostics + scanner *scanner.Scanner + comments []ast.CommentGroup + + pos token.Pos // Current token position + tok token.Token // Current token + lit string // Current token literal + + // Position of the last error written. Two parse errors on the same line are + // ignored. + lastError token.Position +} + +// newParser creates a new parser which will parse the provided src. +func newParser(filename string, src []byte) *parser { + file := token.NewFile(filename) + + p := &parser{ + file: file, + } + + p.scanner = scanner.New(file, src, func(pos token.Pos, msg string) { + p.diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: file.PositionFor(pos), + Message: msg, + }) + }, scanner.IncludeComments) + + p.next() + return p +} + +// next advances the parser to the next non-comment token. +func (p *parser) next() { + p.next0() + + for p.tok == token.COMMENT { + p.consumeCommentGroup() + } +} + +// next0 advances the parser to the next token. next0 should not be used +// directly by parse methods; call next instead. +func (p *parser) next0() { p.pos, p.tok, p.lit = p.scanner.Scan() } + +// consumeCommentGroup consumes a group of adjacent comments, adding it to p's +// comment list. +func (p *parser) consumeCommentGroup() { + var list []*ast.Comment + + endline := p.pos.Position().Line + for p.tok == token.COMMENT && p.pos.Position().Line <= endline+1 { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + p.comments = append(p.comments, ast.CommentGroup(list)) +} + +// consumeComment consumes a comment and returns it with the line number it +// ends on. +func (p *parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.pos.Position().Line + + if p.lit[1] == '*' { + // Block comments may end on a different line than where they start. Scan + // the comment for newlines and adjust endline accordingly. + // + // NOTE: don't use range here, since range will unnecessarily decode + // Unicode code points and slow down the parser. + for i := 0; i < len(p.lit); i++ { + if p.lit[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{StartPos: p.pos, Text: p.lit} + p.next0() + return +} + +// advance consumes tokens up to (but not including) the specified token. +// advance will stop consuming tokens if EOF is reached before to. +func (p *parser) advance(to token.Token) { + for p.tok != token.EOF { + if p.tok == to { + return + } + p.next() + } +} + +// advanceAny consumes tokens up to (but not including) any of the tokens in +// the to set. +func (p *parser) advanceAny(to map[token.Token]struct{}) { + for p.tok != token.EOF { + if _, inSet := to[p.tok]; inSet { + return + } + p.next() + } +} + +// expect consumes the next token. It records an error if the consumed token +// was not t. +func (p *parser) expect(t token.Token) (pos token.Pos, tok token.Token, lit string) { + pos, tok, lit = p.pos, p.tok, p.lit + if tok != t { + p.addErrorf("expected %s, got %s", t, p.tok) + } + p.next() + return +} + +func (p *parser) addErrorf(format string, args ...interface{}) { + pos := p.file.PositionFor(p.pos) + + // Ignore errors which occur on the same line. + if p.lastError.Line == pos.Line { + return + } + p.lastError = pos + + p.diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: pos, + Message: fmt.Sprintf(format, args...), + }) +} + +// ParseFile parses an entire file. +// +// File = Body +func (p *parser) ParseFile() *ast.File { + body := p.parseBody(token.EOF) + + return &ast.File{ + Name: p.file.Name(), + Body: body, + Comments: p.comments, + } +} + +// parseBody parses a series of statements up to and including the "until" +// token, which terminates the body. +// +// Body = [ Statement { terminator Statement } ] +func (p *parser) parseBody(until token.Token) ast.Body { + var body ast.Body + + for p.tok != until && p.tok != token.EOF { + stmt := p.parseStatement() + if stmt != nil { + body = append(body, stmt) + } + + if p.tok == until { + break + } + + if p.tok != token.TERMINATOR { + p.addErrorf("expected %s, got %s", token.TERMINATOR, p.tok) + p.consumeStatement() + } + p.next() + } + + return body +} + +// consumeStatement consumes tokens for the remainder of a statement (i.e., up +// to but not including a terminator). consumeStatement will keep track of the +// number of {}, [], and () pairs, only returning after the count of pairs is +// <= 0. +func (p *parser) consumeStatement() { + var curlyPairs, brackPairs, parenPairs int + + for p.tok != token.EOF { + switch p.tok { + case token.LCURLY: + curlyPairs++ + case token.RCURLY: + curlyPairs-- + case token.LBRACK: + brackPairs++ + case token.RBRACK: + brackPairs-- + case token.LPAREN: + parenPairs++ + case token.RPAREN: + parenPairs-- + } + + if p.tok == token.TERMINATOR { + // Only return after we've consumed all pairs. It's possible for pairs to + // be less than zero if our statement started in a surrounding pair. + if curlyPairs <= 0 && brackPairs <= 0 && parenPairs <= 0 { + return + } + } + + p.next() + } +} + +// parseStatement parses an individual statement within a body. +// +// Statement = Attribute | Block +// Attribute = identifier "=" Expression +// Block = BlockName "{" Body "}" +func (p *parser) parseStatement() ast.Stmt { + blockName := p.parseBlockName() + if blockName == nil { + // parseBlockName failed; skip to the next identifier which would start a + // new Statement. + p.advance(token.IDENT) + return nil + } + + // p.tok is now the first token after the identifier in the attribute or + // block name. + switch p.tok { + case token.ASSIGN: // Attribute + p.next() // Consume "=" + + if len(blockName.Fragments) != 1 { + attrName := strings.Join(blockName.Fragments, ".") + p.diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: blockName.Start.Position(), + EndPos: blockName.Start.Add(len(attrName) - 1).Position(), + Message: `attribute names may only consist of a single identifier with no "."`, + }) + } else if blockName.LabelPos != token.NoPos { + p.diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: blockName.LabelPos.Position(), + // Add 1 to the end position to add in the end quote, which is stripped from the label value. + EndPos: blockName.LabelPos.Add(len(blockName.Label) + 1).Position(), + Message: `attribute names may not have labels`, + }) + } + + return &ast.AttributeStmt{ + Name: &ast.Ident{ + Name: blockName.Fragments[0], + NamePos: blockName.Start, + }, + Value: p.ParseExpression(), + } + + case token.LCURLY: // Block + block := &ast.BlockStmt{ + Name: blockName.Fragments, + NamePos: blockName.Start, + Label: blockName.Label, + LabelPos: blockName.LabelPos, + } + + block.LCurlyPos, _, _ = p.expect(token.LCURLY) + block.Body = p.parseBody(token.RCURLY) + block.RCurlyPos, _, _ = p.expect(token.RCURLY) + + return block + + default: + if blockName.ValidAttribute() { + // The blockname could be used for an attribute or a block (no label, + // only one name fragment), so inform the user of both cases. + p.addErrorf("expected attribute assignment or block body, got %s", p.tok) + } else { + p.addErrorf("expected block body, got %s", p.tok) + } + + // Give up on this statement and skip to the next identifier. + p.advance(token.IDENT) + return nil + } +} + +// parseBlockName parses the name used for a block. +// +// BlockName = identifier { "." identifier } [ string ] +func (p *parser) parseBlockName() *blockName { + if p.tok != token.IDENT { + p.addErrorf("expected identifier, got %s", p.tok) + return nil + } + + var bn blockName + + bn.Fragments = append(bn.Fragments, p.lit) // Append first identifier + bn.Start = p.pos + p.next() + + // { "." identifier } + for p.tok == token.DOT { + p.next() // consume "." + + if p.tok != token.IDENT { + p.addErrorf("expected identifier, got %s", p.tok) + + // Continue here to parse as much as possible, even though the block name + // will be malformed. + } + + bn.Fragments = append(bn.Fragments, p.lit) + p.next() + } + + // [ string ] + if p.tok != token.ASSIGN && p.tok != token.LCURLY { + if p.tok == token.STRING { + // Only allow double-quoted strings for block labels. + if p.lit[0] != '"' { + p.addErrorf("expected block label to be a double quoted string, but got %q", p.lit) + } + + // Strip the quotes if it's non-empty. We then require any non-empty + // label to be a valid identifier. + if len(p.lit) > 2 { + bn.Label = p.lit[1 : len(p.lit)-1] + if !scanner.IsValidIdentifier(bn.Label) { + p.addErrorf("expected block label to be a valid identifier, but got %q", bn.Label) + } + } + bn.LabelPos = p.pos + } else { + p.addErrorf("expected block label, got %s", p.tok) + } + p.next() + } + + return &bn +} + +type blockName struct { + Fragments []string // Name fragments (i.e., `a.b.c`) + Label string // Optional user label + + Start token.Pos + LabelPos token.Pos +} + +// ValidAttribute returns true if the blockName can be used as an attribute +// name. +func (n blockName) ValidAttribute() bool { + return len(n.Fragments) == 1 && n.Label == "" +} + +// ParseExpression parses a single expression. +// +// Expression = BinOpExpr +func (p *parser) ParseExpression() ast.Expr { + return p.parseBinOp(1) +} + +// parseBinOp is the entrypoint for binary expressions. If there is no binary +// expressions in the current state, a single operand will be returned instead. +// +// BinOpExpr = OrExpr +// OrExpr = AndExpr { "||" AndExpr } +// AndExpr = CmpExpr { "&&" CmpExpr } +// CmpExpr = AddExpr { cmp_op AddExpr } +// AddExpr = MulExpr { add_op MulExpr } +// MulExpr = PowExpr { mul_op PowExpr } +// +// parseBinOp avoids the need for multiple non-terminal functions by providing +// context for operator precedence in recursive calls. inPrec specifies the +// incoming operator precedence. On the first call to parseBinOp, inPrec should +// be 1. +// +// parseBinOp can only handle left-associative operators, so PowExpr is handled +// by parsePowExpr. +func (p *parser) parseBinOp(inPrec int) ast.Expr { + // The EBNF documented by the function can be generalized into: + // + // CurPrecExpr = NextPrecExpr { cur_prec_ops NextPrecExpr } + // + // The code below implements this specific grammar, continually collecting + // everything at the same precedence level into the LHS of the expression + // while recursively calling parseBinOp for higher-precedence operations. + + lhs := p.parsePowExpr() + + for { + tok, pos, prec := p.tok, p.pos, p.tok.BinaryPrecedence() + if prec < inPrec { + // The next operator is lower precedence; drop up a level in our call + // stack. + return lhs + } + p.next() // Consume the operator + + // Recurse with a higher precedence level, which ensures that operators at + // the same precedence level don't get handled in the recursive call. + rhs := p.parseBinOp(prec + 1) + + lhs = &ast.BinaryExpr{ + Left: lhs, + Kind: tok, + KindPos: pos, + Right: rhs, + } + } +} + +// parsePowExpr is like parseBinOp but handles the right-associative pow +// operator. +// +// PowExpr = UnaryExpr [ "^" PowExpr ] +func (p *parser) parsePowExpr() ast.Expr { + lhs := p.parseUnaryExpr() + + if p.tok == token.POW { + pos := p.pos + p.next() // Consume ^ + + return &ast.BinaryExpr{ + Left: lhs, + Kind: token.POW, + KindPos: pos, + Right: p.parsePowExpr(), + } + } + + return lhs +} + +// parseUnaryExpr parses a unary expression. +// +// UnaryExpr = OperExpr | unary_op UnaryExpr +// +// OperExpr = PrimaryExpr { AccessExpr | IndexExpr | CallExpr } +// AccessExpr = "." identifier +// IndexExpr = "[" Expression "]" +// CallExpr = "(" [ ExpressionList ] ")" +func (p *parser) parseUnaryExpr() ast.Expr { + if isUnaryOp(p.tok) { + op, pos := p.tok, p.pos + p.next() // Consume op + + return &ast.UnaryExpr{ + Kind: op, + KindPos: pos, + Value: p.parseUnaryExpr(), + } + } + + primary := p.parsePrimaryExpr() + +NextOper: + for { + switch p.tok { + case token.DOT: // AccessExpr + p.next() + namePos, _, name := p.expect(token.IDENT) + + primary = &ast.AccessExpr{ + Value: primary, + Name: &ast.Ident{ + Name: name, + NamePos: namePos, + }, + } + + case token.LBRACK: // IndexExpr + lBrack, _, _ := p.expect(token.LBRACK) + index := p.ParseExpression() + rBrack, _, _ := p.expect(token.RBRACK) + + primary = &ast.IndexExpr{ + Value: primary, + LBrackPos: lBrack, + Index: index, + RBrackPos: rBrack, + } + + case token.LPAREN: // CallExpr + var args []ast.Expr + + lParen, _, _ := p.expect(token.LPAREN) + if p.tok != token.RPAREN { + args = p.parseExpressionList(token.RPAREN) + } + rParen, _, _ := p.expect(token.RPAREN) + + primary = &ast.CallExpr{ + Value: primary, + LParenPos: lParen, + Args: args, + RParenPos: rParen, + } + + case token.STRING, token.LCURLY: + // A user might be trying to assign a block to an attribute. let's + // attempt to parse the remainder as a block to tell them something is + // wrong. + // + // If we can't parse the remainder of the expression as a block, we give + // up and parse the remainder of the entire statement. + if p.tok == token.STRING { + p.next() + } + if _, tok, _ := p.expect(token.LCURLY); tok != token.LCURLY { + p.consumeStatement() + return primary + } + p.parseBody(token.RCURLY) + + end, tok, _ := p.expect(token.RCURLY) + if tok != token.RCURLY { + p.consumeStatement() + return primary + } + + p.diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(primary).Position(), + EndPos: end.Position(), + Message: "cannot use a block as an expression", + }) + + default: + break NextOper + } + } + + return primary +} + +func isUnaryOp(tok token.Token) bool { + switch tok { + case token.NOT, token.SUB: + return true + default: + return false + } +} + +// parsePrimaryExpr parses a primary expression. +// +// PrimaryExpr = LiteralValue | ArrayExpr | ObjectExpr +// +// LiteralValue = identifier | string | number | float | bool | null | +// "(" Expression ")" +// +// ArrayExpr = "[" [ ExpressionList ] "]" +// ObjectExpr = "{" [ FieldList ] "}" +func (p *parser) parsePrimaryExpr() ast.Expr { + switch p.tok { + case token.IDENT: + res := &ast.IdentifierExpr{ + Ident: &ast.Ident{ + Name: p.lit, + NamePos: p.pos, + }, + } + p.next() + return res + + case token.STRING, token.NUMBER, token.FLOAT, token.BOOL, token.NULL: + res := &ast.LiteralExpr{ + Kind: p.tok, + Value: p.lit, + ValuePos: p.pos, + } + p.next() + return res + + case token.LPAREN: + lParen, _, _ := p.expect(token.LPAREN) + expr := p.ParseExpression() + rParen, _, _ := p.expect(token.RPAREN) + + return &ast.ParenExpr{ + LParenPos: lParen, + Inner: expr, + RParenPos: rParen, + } + + case token.LBRACK: + var res ast.ArrayExpr + + res.LBrackPos, _, _ = p.expect(token.LBRACK) + if p.tok != token.RBRACK { + res.Elements = p.parseExpressionList(token.RBRACK) + } + res.RBrackPos, _, _ = p.expect(token.RBRACK) + return &res + + case token.LCURLY: + var res ast.ObjectExpr + + res.LCurlyPos, _, _ = p.expect(token.LCURLY) + if p.tok != token.RBRACK { + res.Fields = p.parseFieldList(token.RCURLY) + } + res.RCurlyPos, _, _ = p.expect(token.RCURLY) + return &res + } + + p.addErrorf("expected expression, got %s", p.tok) + res := &ast.LiteralExpr{Kind: token.NULL, Value: "null", ValuePos: p.pos} + p.advanceAny(statementEnd) // Eat up the rest of the line + return res +} + +var statementEnd = map[token.Token]struct{}{ + token.TERMINATOR: {}, + token.RPAREN: {}, + token.RCURLY: {}, + token.RBRACK: {}, + token.COMMA: {}, +} + +// parseExpressionList parses a list of expressions. +// +// ExpressionList = Expression { "," Expression } [ "," ] +func (p *parser) parseExpressionList(until token.Token) []ast.Expr { + var exprs []ast.Expr + + for p.tok != until && p.tok != token.EOF { + exprs = append(exprs, p.ParseExpression()) + + if p.tok == until { + break + } + if p.tok != token.COMMA { + p.addErrorf("missing ',' in expression list") + } + p.next() + } + + return exprs +} + +// parseFieldList parses a list of fields in an object. +// +// FieldList = Field { "," Field } [ "," ] +func (p *parser) parseFieldList(until token.Token) []*ast.ObjectField { + var fields []*ast.ObjectField + + for p.tok != until && p.tok != token.EOF { + fields = append(fields, p.parseField()) + + if p.tok == until { + break + } + if p.tok != token.COMMA { + p.addErrorf("missing ',' in field list") + } + p.next() + } + + return fields +} + +// parseField parses a field in an object. +// +// Field = ( string | identifier ) "=" Expression +func (p *parser) parseField() *ast.ObjectField { + var field ast.ObjectField + + if p.tok == token.STRING || p.tok == token.IDENT { + field.Name = &ast.Ident{ + Name: p.lit, + NamePos: p.pos, + } + if p.tok == token.STRING && len(p.lit) > 2 { + // The field name is a string literal; unwrap the quotes. + field.Name.Name = p.lit[1 : len(p.lit)-1] + field.Quoted = true + } + p.next() // Consume field name + } else { + p.addErrorf("expected field name (string or identifier), got %s", p.tok) + p.advance(token.ASSIGN) + } + + p.expect(token.ASSIGN) + + field.Value = p.ParseExpression() + return &field +} diff --git a/syntax/parser/internal_test.go b/syntax/parser/internal_test.go new file mode 100644 index 0000000000..c3be1e7581 --- /dev/null +++ b/syntax/parser/internal_test.go @@ -0,0 +1,22 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestObjectFieldName(t *testing.T) { + tt := []string{ + `field_a = 5`, + `"field_a" = 5`, // Quotes should be removed from the field name + } + + for _, tc := range tt { + p := newParser(t.Name(), []byte(tc)) + + res := p.parseField() + + assert.Equal(t, "field_a", res.Name.Name) + } +} diff --git a/syntax/parser/parser.go b/syntax/parser/parser.go new file mode 100644 index 0000000000..66d2199d4b --- /dev/null +++ b/syntax/parser/parser.go @@ -0,0 +1,43 @@ +// Package parser implements utilities for parsing River configuration files. +package parser + +import ( + "github.com/grafana/river/ast" + "github.com/grafana/river/token" +) + +// ParseFile parses an entire River configuration file. The data parameter +// should hold the file contents to parse, while the filename parameter is used +// for reporting errors. +// +// If an error was encountered during parsing, the returned AST will be nil and +// err will be an diag.Diagnostics all the errors encountered during parsing. +func ParseFile(filename string, data []byte) (*ast.File, error) { + p := newParser(filename, data) + + f := p.ParseFile() + if len(p.diags) > 0 { + return nil, p.diags + } + return f, nil +} + +// ParseExpression parses a single River expression from expr. +// +// If an error was encountered during parsing, the returned expression will be +// nil and err will be an ErrorList with all the errors encountered during +// parsing. +func ParseExpression(expr string) (ast.Expr, error) { + p := newParser("", []byte(expr)) + + e := p.ParseExpression() + + // If the current token is not a TERMINATOR then the parsing did not complete + // in full and there are still parts of the string left unparsed. + p.expect(token.TERMINATOR) + + if len(p.diags) > 0 { + return nil, p.diags + } + return e, nil +} diff --git a/syntax/parser/parser_test.go b/syntax/parser/parser_test.go new file mode 100644 index 0000000000..f567c4650a --- /dev/null +++ b/syntax/parser/parser_test.go @@ -0,0 +1,123 @@ +package parser + +import ( + "io/fs" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func FuzzParser(f *testing.F) { + filepath.WalkDir("./testdata/valid", func(path string, d fs.DirEntry, _ error) error { + if d.IsDir() { + return nil + } + + bb, err := os.ReadFile(path) + require.NoError(f, err) + f.Add(bb) + return nil + }) + + f.Fuzz(func(t *testing.T, input []byte) { + p := newParser(t.Name(), input) + + _ = p.ParseFile() + if len(p.diags) > 0 { + t.SkipNow() + } + }) +} + +// TestValid parses every *.river file in testdata, which is expected to be +// valid. +func TestValid(t *testing.T) { + filepath.WalkDir("./testdata/valid", func(path string, d fs.DirEntry, _ error) error { + if d.IsDir() { + return nil + } + + t.Run(filepath.Base(path), func(t *testing.T) { + bb, err := os.ReadFile(path) + require.NoError(t, err) + + p := newParser(path, bb) + + res := p.ParseFile() + require.NotNil(t, res) + require.Len(t, p.diags, 0) + }) + + return nil + }) +} + +func TestParseExpressions(t *testing.T) { + tt := map[string]string{ + "literal number": `10`, + "literal float": `15.0`, + "literal string": `"Hello, world!"`, + "literal ident": `some_ident`, + "literal null": `null`, + "literal true": `true`, + "literal false": `false`, + + "empty array": `[]`, + "array one element": `[1]`, + "array many elements": `[0, 1, 2, 3]`, + "array trailing comma": `[0, 1, 2, 3,]`, + "nested array": `[[0, 1, 2], [3, 4, 5]]`, + "array multiline": `[ + 0, + 1, + 2, + ]`, + + "empty object": `{}`, + "object one field": `{ field_a = 5 }`, + "object multiple fields": `{ field_a = 5, field_b = 10 }`, + "object trailing comma": `{ field_a = 5, field_b = 10, }`, + "nested objects": `{ field_a = { nested_field = 100 } }`, + "object multiline": `{ + field_a = 5, + field_b = 10, + }`, + + "unary not": `!true`, + "unary neg": `-5`, + + "math": `1 + 2 - 3 * 4 / 5 % 6`, + "compare ops": `1 == 2 != 3 < 4 > 5 <= 6 >= 7`, + "logical ops": `true || false && true`, + "pow operator": "1 ^ 2 ^ 3", + + "field access": `a.b.c.d`, + "element access": `a[0][1][2]`, + + "call no args": `a()`, + "call one arg": `a(1)`, + "call multiple args": `a(1,2,3)`, + "call with trailing comma": `a(1,2,3,)`, + "call multiline": `a( + 1, + 2, + 3, + )`, + + "parens": `(1 + 5) * 100`, + + "mixed expression": `(a.b.c)(1, 3 * some_list[magic_index * 2]).resulting_field`, + } + + for name, input := range tt { + t.Run(name, func(t *testing.T) { + p := newParser(name, []byte(input)) + + res := p.ParseExpression() + require.NotNil(t, res) + require.Len(t, p.diags, 0) + }) + } +} diff --git a/syntax/parser/testdata/assign_block_to_attr.river b/syntax/parser/testdata/assign_block_to_attr.river new file mode 100644 index 0000000000..e291308599 --- /dev/null +++ b/syntax/parser/testdata/assign_block_to_attr.river @@ -0,0 +1,32 @@ +rw = prometheus/* ERROR "cannot use a block as an expression" */.remote_write "default" { + endpoint { + url = "some_url" + basic_auth { + username = "username" + password = "password" + } + } +} + +attr_1 = 15 +attr_2 = 51 + +block { + rw_2 = prometheus/* ERROR "cannot use a block as an expression" */.remote_write "other" { + endpoint { + url = "other_url" + basic_auth { + username = "username_2" + password = "password_2" + } + } + } +} + +other_block { + // This is an expression which looks like it might be a block at first, but + // then isn't. + rw_3 = prometheus.remote_write "other" "other" /* ERROR "expected {, got STRING" */ 12345 +} + +attr_3 = 15 diff --git a/syntax/parser/testdata/attribute_names.river b/syntax/parser/testdata/attribute_names.river new file mode 100644 index 0000000000..1e18c60850 --- /dev/null +++ b/syntax/parser/testdata/attribute_names.river @@ -0,0 +1,7 @@ +valid_attr = 15 + +// The parser parses block names for both blocks and attributes, and later +// validates that the attribute name is just a single identifier with no label. + +invalid/* ERROR "attribute names may only consist of a single identifier" */.attr = 20 +invalid "label" /* ERROR "attribute names may not have labels" */ = 20 diff --git a/syntax/parser/testdata/block_names.river b/syntax/parser/testdata/block_names.river new file mode 100644 index 0000000000..cc0f2040f3 --- /dev/null +++ b/syntax/parser/testdata/block_names.river @@ -0,0 +1,25 @@ +valid_block { + +} + +valid_block "labeled" { + +} + +invalid_block bad_label_name /* ERROR "expected block label, got IDENT" */ { + +} + +other_valid_block { + nested_block { + + } + + nested_block "labeled" { + + } +} + +invalid_block "with space" /* ERROR "expected block label to be a valid identifier" */ { + +} diff --git a/syntax/parser/testdata/commas.river b/syntax/parser/testdata/commas.river new file mode 100644 index 0000000000..a43bb5c873 --- /dev/null +++ b/syntax/parser/testdata/commas.river @@ -0,0 +1,13 @@ +// Test that missing trailing commas for multiline expressions get reported. + +field = [ + 0, + 1, + 2/* ERROR HERE "missing ',' in expression list" */ +] + +obj = { + field_a = 0, + field_b = 1, + field_c = 2/* ERROR HERE "missing ',' in field list" */ +} diff --git a/syntax/parser/testdata/fuzz/FuzzParser/1a39f4e358facc21678b16fad53537b46efdaa76e024a5ef4955d01a68bdac37 b/syntax/parser/testdata/fuzz/FuzzParser/1a39f4e358facc21678b16fad53537b46efdaa76e024a5ef4955d01a68bdac37 new file mode 100644 index 0000000000..6151b52921 --- /dev/null +++ b/syntax/parser/testdata/fuzz/FuzzParser/1a39f4e358facc21678b16fad53537b46efdaa76e024a5ef4955d01a68bdac37 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("A0000000000000000") diff --git a/syntax/parser/testdata/fuzz/FuzzParser/248cf4391f6c48550b7d2cf4c6c80f4ba9099c21ffa2b6869e75e99565dce037 b/syntax/parser/testdata/fuzz/FuzzParser/248cf4391f6c48550b7d2cf4c6c80f4ba9099c21ffa2b6869e75e99565dce037 new file mode 100644 index 0000000000..cc252cd81b --- /dev/null +++ b/syntax/parser/testdata/fuzz/FuzzParser/248cf4391f6c48550b7d2cf4c6c80f4ba9099c21ffa2b6869e75e99565dce037 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("A={A!0A\"") diff --git a/syntax/parser/testdata/fuzz/FuzzParser/b919fa00ebca318001778477c839a06204b55f2636597901d8d7878150d8580a b/syntax/parser/testdata/fuzz/FuzzParser/b919fa00ebca318001778477c839a06204b55f2636597901d8d7878150d8580a new file mode 100644 index 0000000000..ff4ab488f8 --- /dev/null +++ b/syntax/parser/testdata/fuzz/FuzzParser/b919fa00ebca318001778477c839a06204b55f2636597901d8d7878150d8580a @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("A\"") diff --git a/syntax/parser/testdata/invalid_exprs.river b/syntax/parser/testdata/invalid_exprs.river new file mode 100644 index 0000000000..c7ea3e4385 --- /dev/null +++ b/syntax/parser/testdata/invalid_exprs.river @@ -0,0 +1,4 @@ +attr = 1 + + /* ERROR "expected expression, got +" */ 2 + +invalid_func_call = a(() /* ERROR "expected expression, got \)" */) +invalid_access = a.true /* ERROR "expected IDENT, got BOOL" */ diff --git a/syntax/parser/testdata/invalid_object_key.river b/syntax/parser/testdata/invalid_object_key.river new file mode 100644 index 0000000000..1dd5a4ba8c --- /dev/null +++ b/syntax/parser/testdata/invalid_object_key.river @@ -0,0 +1,9 @@ +obj { + map = { + "string_field" = "foo", + identifier_string = "bar", + 1337 /* ERROR "expected field name \(string or identifier\), got NUMBER" */ = "baz", + "another_field" = "qux", + } +} + diff --git a/syntax/parser/testdata/valid/attribute.river b/syntax/parser/testdata/valid/attribute.river new file mode 100644 index 0000000000..9cc731b27f --- /dev/null +++ b/syntax/parser/testdata/valid/attribute.river @@ -0,0 +1 @@ +number_field = 1 diff --git a/syntax/parser/testdata/valid/blocks.river b/syntax/parser/testdata/valid/blocks.river new file mode 100644 index 0000000000..74d79bfe67 --- /dev/null +++ b/syntax/parser/testdata/valid/blocks.river @@ -0,0 +1,36 @@ +one_ident { + number_field = 1 +} + +one_ident "labeled" { + number_field = 1 +} + +multiple.idents { + number_field = 1 +} + +multiple.idents "labeled" { + number_field = 1 +} + +chain.of.idents { + number_field = 1 +} + +chain.of.idents "labeled" { + number_field = 1 +} + +one_ident_inline { number_field = 1 } +one_ident_inline "labeled" { number_field = 1 } +multiple.idents_inline { number_field = 1 } +multiple.idents_inline "labeled" { number_field = 1 } +chain.of.idents { number_field = 1 } +chain.of.idents "labeled" { number_field = 1 } + +nested_block { + inner_block { + some_field = true + } +} diff --git a/syntax/parser/testdata/valid/comments.river b/syntax/parser/testdata/valid/comments.river new file mode 100644 index 0000000000..9dafbd7346 --- /dev/null +++ b/syntax/parser/testdata/valid/comments.river @@ -0,0 +1 @@ +// Hello, world! diff --git a/syntax/parser/testdata/valid/empty.river b/syntax/parser/testdata/valid/empty.river new file mode 100644 index 0000000000..e69de29bb2 diff --git a/syntax/parser/testdata/valid/expressions.river b/syntax/parser/testdata/valid/expressions.river new file mode 100644 index 0000000000..78af48cc22 --- /dev/null +++ b/syntax/parser/testdata/valid/expressions.river @@ -0,0 +1,81 @@ +// Literals +lit_number = 10 +lit_float = 15.0 +lit_string = "Hello, world!" +lit_ident = other_ident +lit_null = null +lit_true = true +lit_false = false + +// Arrays +array_expr_empty = [] +array_expr_one_element = [0] +array_expr = [0, 1, 2, 3] +array_expr_trailing = [0, 1, 2, 3,] +array_expr_multiline = [ + 0, + 1, + 2, + 3, +] +array_expr_nested = [[1]] + +// Objects +object_expr_empty = {} +object_expr_one_field = { field_a = 1 } +object_expr = { field_a = 1, field_b = 2 } +object_expr_trailing = { field_a = 1, field_b = 2, } +object_expr_multiline = { + field_a = 1, + field_b = 2, +} +object_expr_nested = { field_a = { nested_field_a = 1 } } + +// Unary ops +not_something = !true +neg_number = -5 + +// Math binops +binop_sum = 1 + 2 +binop_sub = 1 - 2 +binop_mul = 1 * 2 +binop_div = 1 / 2 +binop_mod = 1 % 2 +binop_pow = 1 ^ 2 ^ 3 + +// Compare binops +binop_eq = 1 == 2 +binop_neq = 1 != 2 +binop_lt = 1 < 2 +binop_lte = 1 <= 2 +binop_gt = 1 > 2 +binop_gte = 1 >= 2 + +// Logical binops +binop_or = true || false +binop_and = true && false + + +// Mixed math operations +math = 1 + 2 - 3 * 4 / 5 % 6 +compare_ops = 1 == 2 != 3 < 4 > 5 <= 6 >= 7 +logical_ops = true || false && true +mixed_assoc = 1 * 3 + 5 ^ 3 - 2 % 1 // Test with both left- and right- associative operators +expr_parens = (5 * 2) + 5 + +// Accessors +field_access = a.b.c.d +element_access = a[0][1][2] + +// Function calls +call_no_args = a() +call_one_arg = a(1) +call_multiple_args = a(1,2,3) +call_trailing_comma = a(1,2,3,) +call_multiline = a( + 1, + 2, + 3, +) + +mixed_expr = (a.b.c)(1, 3 * some_list[magic_index * 2]).resulting_field diff --git a/syntax/printer/printer.go b/syntax/printer/printer.go new file mode 100644 index 0000000000..8faeb22006 --- /dev/null +++ b/syntax/printer/printer.go @@ -0,0 +1,556 @@ +// Package printer contains utilities for pretty-printing River ASTs. +package printer + +import ( + "fmt" + "io" + "math" + "text/tabwriter" + + "github.com/grafana/river/ast" + "github.com/grafana/river/token" +) + +// Config configures behavior of the printer. +type Config struct { + Indent int // Indentation to apply to all emitted code. Default 0. +} + +// Fprint pretty-prints the specified node to w. The Node type must be an +// *ast.File, ast.Body, or a type that implements ast.Stmt or ast.Expr. +func (c *Config) Fprint(w io.Writer, node ast.Node) (err error) { + var p printer + p.Init(c) + + // Pass all of our text through a trimmer to ignore trailing whitespace. + w = &trimmer{next: w} + + if err = (&walker{p: &p}).Walk(node); err != nil { + return + } + + // Call flush one more time to write trailing comments. + p.flush(token.Position{ + Offset: math.MaxInt, + Line: math.MaxInt, + Column: math.MaxInt, + }, token.EOF) + + w = tabwriter.NewWriter(w, 0, 8, 1, ' ', tabwriter.DiscardEmptyColumns|tabwriter.TabIndent) + + if _, err = w.Write(p.output); err != nil { + return + } + if tw, _ := w.(*tabwriter.Writer); tw != nil { + // Flush tabwriter if defined + err = tw.Flush() + } + + return +} + +// Fprint pretty-prints the specified node to w. The Node type must be an +// *ast.File, ast.Body, or a type that implements ast.Stmt or ast.Expr. +func Fprint(w io.Writer, node ast.Node) error { + c := &Config{} + return c.Fprint(w, node) +} + +// The printer writes lexical tokens and whitespace to an internal buffer. +// Comments are written by the printer itself, while all other tokens and +// formatting characters are sent through calls to Write. +// +// Internally, printer depends on a tabwriter for formatting text and aligning +// runs of characters. Horizontal '\t' and vertical '\v' tab characters are +// used to introduce new columns in the row. Runs of characters are stopped +// be either introducing a linefeed '\f' or by having a line with a different +// number of columns from the previous line. See the text/tabwriter package for +// more information on the elastic tabstop algorithm it uses for formatting +// text. +type printer struct { + cfg Config + + // State variables + + output []byte + indent int // Current indentation level + lastTok token.Token // Last token printed (token.LITERAL if it's whitespace) + + // Whitespace holds a buffer of whitespace characters to print prior to the + // next non-whitespace token. Whitespace is held in a buffer to avoid + // printing unnecessary whitespace at the end of a file. + whitespace []whitespace + + // comments stores comments to be processed as elements are printed. + comments commentInfo + + // pos is an approximation of the current position in AST space, and is used + // to determine space between AST elements (e.g., if a comment should come + // before a token). pos automatically as elements are written and can be manually + // set to guarantee an accurate position by passing a token.Pos to Write. + pos token.Position + last token.Position // Last pos written to output (through writeString) + + // out is an accurate representation of the current position in output space, + // used to inject extra formatting like indentation based on the output + // position. + // + // out may differ from pos in terms of whitespace. + out token.Position +} + +type commentInfo struct { + list []ast.CommentGroup + idx int + cur ast.CommentGroup + pos token.Pos +} + +func (ci *commentInfo) commentBefore(next token.Position) bool { + return ci.pos != token.NoPos && ci.pos.Offset() <= next.Offset +} + +// nextComment preloads the next comment. +func (ci *commentInfo) nextComment() { + for ci.idx < len(ci.list) { + c := ci.list[ci.idx] + ci.idx++ + if len(c) > 0 { + ci.cur = c + ci.pos = ast.StartPos(c[0]) + return + } + } + ci.pos = token.NoPos +} + +// Init initializes the printer for printing. Init is intended to be called +// once per printer and doesn't fully reset its state. +func (p *printer) Init(cfg *Config) { + p.cfg = *cfg + p.pos = token.Position{Line: 1, Column: 1} + p.out = token.Position{Line: 1, Column: 1} + // Capacity is set low since most whitespace sequences are short. + p.whitespace = make([]whitespace, 0, 16) +} + +// SetComments set the comments to use. +func (p *printer) SetComments(comments []ast.CommentGroup) { + p.comments = commentInfo{ + list: comments, + idx: 0, + pos: token.NoPos, + } + p.comments.nextComment() +} + +// Write writes a list of writable arguments to the printer. +// +// Arguments can be one of the types described below: +// +// If arg is a whitespace value, it is accumulated into a buffer and flushed +// only after a non-whitespace value is processed. The whitespace buffer will +// be forcibly flushed if the buffer becomes full without writing a +// non-whitespace token. +// +// If arg is an *ast.IdentifierExpr, *ast.LiteralExpr, or a token.Token, the +// human-readable representation of that value will be written. +// +// When writing text, comments which need to appear before that text in +// AST-space are written first, followed by leftover whitespace and then the +// text to write. The written text will update the AST-space position. +// +// If arg is a token.Pos, the AST-space position of the printer is updated to +// the provided Pos. Writing token.Pos values can help make sure the printer's +// AST-space position is accurate, as AST-space position is otherwise an +// estimation based on written data. +func (p *printer) Write(args ...interface{}) { + for _, arg := range args { + var ( + data string + isLit bool + ) + + switch arg := arg.(type) { + case whitespace: + // Whitespace token; add it to our token buffer. Note that a whitespace + // token is different than the actual whitespace which will get written + // (e.g., wsIndent increases indentation level by one instead of setting + // it to one.) + if arg == wsIgnore { + continue + } + i := len(p.whitespace) + if i == cap(p.whitespace) { + // We built up too much whitespace; this can happen if too many calls + // to Write happen without appending a non-comment token. We will + // force-flush the existing whitespace to avoid a panic. + // + // Ideally this line is never hit based on how we walk the AST, but + // it's kept for safety. + p.writeWritespace(i) + i = 0 + } + p.whitespace = p.whitespace[0 : i+1] + p.whitespace[i] = arg + p.lastTok = token.LITERAL + continue + + case *ast.Ident: + data = arg.Name + p.lastTok = token.IDENT + + case *ast.LiteralExpr: + data = arg.Value + p.lastTok = arg.Kind + + case token.Pos: + if arg.Valid() { + p.pos = arg.Position() + } + // Don't write anything; token.Pos is an instruction and doesn't include + // any text to write. + continue + + case token.Token: + s := arg.String() + data = s + + // We will need to inject whitespace if the previous token and the + // current token would combine into a single token when re-scanned. This + // ensures that the sequence of tokens emitted by the output of the + // printer match the sequence of tokens from the input. + if mayCombine(p.lastTok, s[0]) { + if len(p.whitespace) != 0 { + // It shouldn't be possible for the whitespace buffer to be not empty + // here; p.lastTok would've had to been a non-whitespace token and so + // whitespace would've been flushed when it was written to the output + // buffer. + panic("whitespace buffer not empty") + } + p.whitespace = p.whitespace[0:1] + p.whitespace[0] = ' ' + } + p.lastTok = arg + + default: + panic(fmt.Sprintf("printer: unsupported argument %v (%T)\n", arg, arg)) + } + + next := p.pos + + p.flush(next, p.lastTok) + p.writeString(next, data, isLit) + } +} + +// mayCombine returns true if two tokes must not be combined, because combining +// them would format in a different token sequence being generated. +func mayCombine(prev token.Token, next byte) (b bool) { + switch prev { + case token.NUMBER: + return next == '.' // 1. + case token.DIV: + return next == '*' // /* + default: + return false + } +} + +// flush prints any pending comments and whitespace occurring textually before +// the position of the next token tok. The flush result indicates if a newline +// was written or if a formfeed \f character was dropped from the whitespace +// buffer. +func (p *printer) flush(next token.Position, tok token.Token) { + if p.comments.commentBefore(next) { + p.injectComments(next, tok) + } else if tok != token.EOF { + // Write all remaining whitespace. + p.writeWritespace(len(p.whitespace)) + } +} + +func (p *printer) injectComments(next token.Position, tok token.Token) { + var lastComment *ast.Comment + + for p.comments.commentBefore(next) { + for _, c := range p.comments.cur { + p.writeCommentPrefix(next, c) + p.writeComment(next, c) + lastComment = c + } + p.comments.nextComment() + } + + p.writeCommentSuffix(next, tok, lastComment) +} + +// writeCommentPrefix writes whitespace that should appear before c. +func (p *printer) writeCommentPrefix(next token.Position, c *ast.Comment) { + if len(p.output) == 0 { + // The comment is the first thing written to the output. Don't write any + // whitespace before it. + return + } + + cPos := c.StartPos.Position() + + if cPos.Line == p.last.Line { + // Our comment is on the same line as the last token. Write a separator + // between the last token and the comment. + separator := byte('\t') + if cPos.Line == next.Line { + // The comment is on the same line as the next token, which means it has + // to be a block comment (since line comments run to the end of the + // line.) Use a space as the separator instead since a tab in the middle + // of a line between comments would look weird. + separator = byte(' ') + } + p.writeByte(separator, 1) + } else { + // Our comment is on a different line from the last token. First write + // pending whitespace from the last token up to the first newline. + var wsCount int + + for i, ws := range p.whitespace { + switch ws { + case wsBlank, wsVTab: + // Drop any whitespace before the comment. + p.whitespace[i] = wsIgnore + case wsIndent, wsUnindent: + // Allow indentation to be applied. + continue + case wsNewline, wsFormfeed: + // Drop the whitespace since we're about to write our own. + p.whitespace[i] = wsIgnore + } + wsCount = i + break + } + p.writeWritespace(wsCount) + + var newlines int + if cPos.Valid() && p.last.Valid() { + newlines = cPos.Line - p.last.Line + } + if newlines > 0 { + p.writeByte('\f', newlineLimit(newlines)) + } + } +} + +func (p *printer) writeComment(_ token.Position, c *ast.Comment) { + p.writeString(c.StartPos.Position(), c.Text, true) +} + +// writeCommentSuffix writes any whitespace necessary between the last comment +// and next. lastComment should be the final comment written. +func (p *printer) writeCommentSuffix(next token.Position, tok token.Token, lastComment *ast.Comment) { + if tok == token.EOF { + // We don't want to add any blank newlines before the end of the file; + // return early. + return + } + + var droppedFF bool + + // If our final comment is a block comment and is on the same line as the + // next token, add a space as a suffix to separate them. + lastCommentPos := ast.EndPos(lastComment).Position() + if lastComment.Text[1] == '*' && next.Line == lastCommentPos.Line { + p.writeByte(' ', 1) + } + + newlines := next.Line - p.last.Line + + for i, ws := range p.whitespace { + switch ws { + case wsBlank, wsVTab: + p.whitespace[i] = wsIgnore + case wsIndent, wsUnindent: + continue + case wsNewline, wsFormfeed: + if ws == wsFormfeed { + droppedFF = true + } + p.whitespace[i] = wsIgnore + } + } + + p.writeWritespace(len(p.whitespace)) + + // Write newlines as long as the next token isn't EOF (so that there's no + // blank newlines at the end of the file). + if newlines > 0 { + ch := byte('\n') + if droppedFF { + // If we dropped a formfeed while writing comments, we should emit a new + // one. + ch = byte('\f') + } + p.writeByte(ch, newlineLimit(newlines)) + } +} + +// writeString writes the literal string s into the printer's output. +// Formatting characters in s such as '\t' and '\n' will be interpreted by +// underlying tabwriter unless isLit is set. +func (p *printer) writeString(pos token.Position, s string, isLit bool) { + if p.out.Column == 1 { + // We haven't written any text to this line yet; prepend our indentation + // for the line. + p.writeIndent() + } + + if pos.Valid() { + // Update p.pos if pos is valid. This is done *after* handling indentation + // since we want to interpret pos as the literal position for s (and + // writeIndent will update p.pos). + p.pos = pos + } + + if isLit { + // Wrap our literal string in tabwriter.Escape if it's meant to be written + // without interpretation by the tabwriter. + p.output = append(p.output, tabwriter.Escape) + + defer func() { + p.output = append(p.output, tabwriter.Escape) + }() + } + + p.output = append(p.output, s...) + + var ( + newlines int + lastNewlineIdx int + ) + + for i := 0; i < len(s); i++ { + if ch := s[i]; ch == '\n' || ch == '\f' { + newlines++ + lastNewlineIdx = i + } + } + + p.pos.Offset += len(s) + + if newlines > 0 { + p.pos.Line += newlines + p.out.Line += newlines + + newColumn := len(s) - lastNewlineIdx + p.pos.Column = newColumn + p.out.Column = newColumn + } else { + p.pos.Column += len(s) + p.out.Column += len(s) + } + + p.last = p.pos +} + +func (p *printer) writeIndent() { + depth := p.cfg.Indent + p.indent + for i := 0; i < depth; i++ { + p.output = append(p.output, '\t') + } + + p.pos.Offset += depth + p.pos.Column += depth + p.out.Column += depth +} + +// writeByte writes ch n times to the output, updating the position of the +// printer. writeByte is only used for writing whitespace characters. +func (p *printer) writeByte(ch byte, n int) { + if p.out.Column == 1 { + p.writeIndent() + } + + for i := 0; i < n; i++ { + p.output = append(p.output, ch) + } + + // Update positions. + p.pos.Offset += n + if ch == '\n' || ch == '\f' { + p.pos.Line += n + p.out.Line += n + p.pos.Column = 1 + p.out.Column = 1 + return + } + p.pos.Column += n + p.out.Column += n +} + +// writeWhitespace writes the first n whitespace entries in the whitespace +// buffer. +// +// writeWritespace is only safe to be called when len(p.whitespace) >= n. +func (p *printer) writeWritespace(n int) { + for i := 0; i < n; i++ { + switch ch := p.whitespace[i]; ch { + case wsIgnore: // no-op + case wsIndent: + p.indent++ + case wsUnindent: + p.indent-- + if p.indent < 0 { + panic("printer: negative indentation") + } + default: + p.writeByte(byte(ch), 1) + } + } + + // Shift remaining entries down + l := copy(p.whitespace, p.whitespace[n:]) + p.whitespace = p.whitespace[:l] +} + +const maxNewlines = 2 + +// newlineLimit limits a newline count to maxNewlines. +func newlineLimit(count int) int { + if count > maxNewlines { + count = maxNewlines + } + return count +} + +// whitespace represents a whitespace token to write to the printer's internal +// buffer. +type whitespace byte + +const ( + wsIgnore = whitespace(0) + wsBlank = whitespace(' ') + wsVTab = whitespace('\v') + wsNewline = whitespace('\n') + wsFormfeed = whitespace('\f') + wsIndent = whitespace('>') + wsUnindent = whitespace('<') +) + +func (ws whitespace) String() string { + switch ws { + case wsIgnore: + return "wsIgnore" + case wsBlank: + return "wsBlank" + case wsVTab: + return "wsVTab" + case wsNewline: + return "wsNewline" + case wsFormfeed: + return "wsFormfeed" + case wsIndent: + return "wsIndent" + case wsUnindent: + return "wsUnindent" + default: + return fmt.Sprintf("whitespace(%d)", ws) + } +} diff --git a/syntax/printer/printer_test.go b/syntax/printer/printer_test.go new file mode 100644 index 0000000000..38f69217b1 --- /dev/null +++ b/syntax/printer/printer_test.go @@ -0,0 +1,77 @@ +package printer_test + +import ( + "bytes" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + "unicode" + + "github.com/grafana/river/parser" + "github.com/grafana/river/printer" + "github.com/stretchr/testify/require" +) + +func TestPrinter(t *testing.T) { + filepath.WalkDir("testdata", func(path string, d fs.DirEntry, _ error) error { + if d.IsDir() { + return nil + } + + if strings.HasSuffix(path, ".in") { + inputFile := path + expectFile := strings.TrimSuffix(path, ".in") + ".expect" + expectErrorFile := strings.TrimSuffix(path, ".in") + ".error" + + caseName := filepath.Base(path) + caseName = strings.TrimSuffix(caseName, ".in") + + t.Run(caseName, func(t *testing.T) { + testPrinter(t, inputFile, expectFile, expectErrorFile) + }) + } + + return nil + }) +} + +func testPrinter(t *testing.T, inputFile string, expectFile string, expectErrorFile string) { + inputBB, err := os.ReadFile(inputFile) + require.NoError(t, err) + + f, err := parser.ParseFile(t.Name()+".rvr", inputBB) + if expectedError := getExpectedErrorMessage(t, expectErrorFile); expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + return + } + + expectBB, err := os.ReadFile(expectFile) + require.NoError(t, err) + + var buf bytes.Buffer + require.NoError(t, printer.Fprint(&buf, f)) + + trimmed := strings.TrimRightFunc(string(expectBB), unicode.IsSpace) + require.Equal(t, trimmed, buf.String(), "%s", buf.String()) +} + +// getExpectedErrorMessage will retrieve an optional expected error message for the test. +func getExpectedErrorMessage(t *testing.T, errorFile string) string { + if _, err := os.Stat(errorFile); err == nil { + errorBytes, err := os.ReadFile(errorFile) + require.NoError(t, err) + errorsString := string(normalizeLineEndings(errorBytes)) + return errorsString + } + + return "" +} + +// normalizeLineEndings will replace '\r\n' with '\n'. +func normalizeLineEndings(data []byte) []byte { + normalized := bytes.ReplaceAll(data, []byte{'\r', '\n'}, []byte{'\n'}) + return normalized +} diff --git a/syntax/printer/testdata/.gitattributes b/syntax/printer/testdata/.gitattributes new file mode 100644 index 0000000000..7949f2b32c --- /dev/null +++ b/syntax/printer/testdata/.gitattributes @@ -0,0 +1 @@ +* -text eol=lf diff --git a/syntax/printer/testdata/array_comments.expect b/syntax/printer/testdata/array_comments.expect new file mode 100644 index 0000000000..a9e921b6dc --- /dev/null +++ b/syntax/printer/testdata/array_comments.expect @@ -0,0 +1,17 @@ +// array_comments.in expects that comments in arrays are formatted to +// retain the indentation level of elements within the arrays. + +attr = [ // Inline comment + 0, 1, 2, // Inline comment + 3, 4, 5, // Inline comment + // Trailing comment +] + +attr = [ + 0, + // Element-level comment + 1, + // Element-level comment + 2, + // Trailing comment +] diff --git a/syntax/printer/testdata/array_comments.in b/syntax/printer/testdata/array_comments.in new file mode 100644 index 0000000000..e088e83749 --- /dev/null +++ b/syntax/printer/testdata/array_comments.in @@ -0,0 +1,17 @@ +// array_comments.in expects that comments in arrays are formatted to +// retain the indentation level of elements within the arrays. + +attr = [ // Inline comment + 0, 1, 2, // Inline comment + 3, 4, 5, // Inline comment + // Trailing comment +] + +attr = [ + 0, + // Element-level comment + 1, + // Element-level comment + 2, + // Trailing comment +] diff --git a/syntax/printer/testdata/block_comments.expect b/syntax/printer/testdata/block_comments.expect new file mode 100644 index 0000000000..ca60e0796c --- /dev/null +++ b/syntax/printer/testdata/block_comments.expect @@ -0,0 +1,62 @@ +// block_comments.in expects that comments within blocks are formatted to +// remain within the block with the proper indentation. + +// +// Unlabeled blocks +// + +// Comment is on same line as empty block header. +block { // comment +} + +// Comment is on same line as non-empty block header. +block { // comment + attr = 5 +} + +// Comment is alone in block body. +block { + // comment +} + +// Comment is before a statement. +block { + // comment + attr = 5 +} + +// Comment is after a statement. +block { + attr = 5 + // comment +} + +// +// Labeled blocks +// + +// Comment is on same line as empty block header. +block "label" { // comment +} + +// Comment is on same line as non-empty block header. +block "label" { // comment + attr = 5 +} + +// Comment is alone in block body. +block "label" { + // comment +} + +// Comment is before a statement. +block "label" { + // comment + attr = 5 +} + +// Comment is after a statement. +block "label" { + attr = 5 + // comment +} diff --git a/syntax/printer/testdata/block_comments.in b/syntax/printer/testdata/block_comments.in new file mode 100644 index 0000000000..d60512c9a3 --- /dev/null +++ b/syntax/printer/testdata/block_comments.in @@ -0,0 +1,64 @@ +// block_comments.in expects that comments within blocks are formatted to +// remain within the block with the proper indentation. + +// +// Unlabeled blocks +// + +// Comment is on same line as empty block header. +block { // comment +} + +// Comment is on same line as non-empty block header. +block { // comment + attr = 5 +} + +// Comment is alone in block body. +block { +// comment +} + +// Comment is before a statement. +block { +// comment + attr = 5 +} + +// Comment is after a statement. +block { + attr = 5 +// comment +} + +// +// Labeled blocks +// + +// Comment is on same line as empty block header. +block "label" { // comment +} + +// Comment is on same line as non-empty block header. +block "label" { // comment + attr = 5 +} + +// Comment is alone in block body. +block "label" { +// comment +} + +// Comment is before a statement. +block "label" { +// comment + attr = 5 +} + +// Comment is after a statement. +block "label" { + attr = 5 +// comment +} + + diff --git a/syntax/printer/testdata/example.expect b/syntax/printer/testdata/example.expect new file mode 100644 index 0000000000..dd6ab8e1f2 --- /dev/null +++ b/syntax/printer/testdata/example.expect @@ -0,0 +1,60 @@ +// This file tests a little bit of everything that the formatter should do. For +// example, this block of comments itself ensures that the output retains +// comments found in the source file. + +// +// Whitespace tests +// + +// Attributes should be given whitespace +attr_1 = 15 +attr_2 = 30 * 2 + 5 +attr_3 = field.access * 2 + +// Blocks with nothing inside of them should be truncated. +empty.block { } + +empty.block "labeled" { } + +// +// Alignment tests +// + +// Sequences of attributes which aren't separated by a blank line should have +// the equal sign aligned. +short_name = true +really_long_name = true + +extremely_long_name = true + +// Sequences of comments on aligned lines should also be aligned. +short_name = "short value" // Align me +really_long_name = "really long value" // Align me + +extremely_long_name = true // Unaligned + +// +// Indentation tests +// + +// Array literals, object literals, and blocks should all be indented properly. +multiline_array = [ + 0, + 1, +] + +mulitiline_object = { + foo = "bar", +} + +some_block { + attr = 15 + + inner_block { + attr = 20 + } +} + +// Trailing comments should be retained in the output. If this comment gets +// trimmed out, it usually indicates that a final flush is missing after +// traversing the AST. diff --git a/syntax/printer/testdata/example.in b/syntax/printer/testdata/example.in new file mode 100644 index 0000000000..efce00a8a3 --- /dev/null +++ b/syntax/printer/testdata/example.in @@ -0,0 +1,64 @@ +// This file tests a little bit of everything that the formatter should do. For +// example, this block of comments itself ensures that the output retains +// comments found in the source file. + +// +// Whitespace tests +// + +// Attributes should be given whitespace +attr_1=15 +attr_2=30*2+5 +attr_3=field.access*2 + +// Blocks with nothing inside of them should be truncated. +empty.block { + +} + +empty.block "labeled" { + +} + +// +// Alignment tests +// + +// Sequences of attributes which aren't separated by a blank line should have +// the equal sign aligned. +short_name = true +really_long_name = true + +extremely_long_name = true + +// Sequences of comments on aligned lines should also be aligned. +short_name = "short value" // Align me +really_long_name = "really long value" // Align me + +extremely_long_name = true // Unaligned + +// +// Indentation tests +// + +// Array literals, object literals, and blocks should all be indented properly. +multiline_array = [ +0, +1, +] + +mulitiline_object = { +foo = "bar", +} + +some_block { +attr = 15 + +inner_block { +attr = 20 +} +} + +// Trailing comments should be retained in the output. If this comment gets +// trimmed out, it usually indicates that a final flush is missing after +// traversing the AST. diff --git a/syntax/printer/testdata/func_call.expect b/syntax/printer/testdata/func_call.expect new file mode 100644 index 0000000000..e42c7acaf7 --- /dev/null +++ b/syntax/printer/testdata/func_call.expect @@ -0,0 +1,17 @@ +one_line = some_func(1, 2, 3, 4) + +multi_line = some_func(1, + 2, 3, + 4) + +multi_line_pretty = some_func( + 1, + 2, + 3, + 4, +) + +func_with_obj = some_func({ + key1 = "value1", + key2 = "value2", +}) diff --git a/syntax/printer/testdata/func_call.in b/syntax/printer/testdata/func_call.in new file mode 100644 index 0000000000..141a6057cf --- /dev/null +++ b/syntax/printer/testdata/func_call.in @@ -0,0 +1,17 @@ +one_line = some_func(1, 2, 3, 4) + +multi_line = some_func(1, +2, 3, +4) + +multi_line_pretty = some_func( +1, +2, +3, +4, +) + +func_with_obj = some_func({ + key1 = "value1", + key2 = "value2", +}) diff --git a/syntax/printer/testdata/mixed_list.expect b/syntax/printer/testdata/mixed_list.expect new file mode 100644 index 0000000000..7ce6f28dfb --- /dev/null +++ b/syntax/printer/testdata/mixed_list.expect @@ -0,0 +1,16 @@ +mixed_list = [0, true, { + key_1 = true, + key_2 = true, + key_3 = true, +}, "Hello!"] + +mixed_list_2 = [ + 0, + true, + { + key_1 = true, + key_2 = true, + key_3 = true, + }, + "Hello!", +] diff --git a/syntax/printer/testdata/mixed_list.in b/syntax/printer/testdata/mixed_list.in new file mode 100644 index 0000000000..7ce6f28dfb --- /dev/null +++ b/syntax/printer/testdata/mixed_list.in @@ -0,0 +1,16 @@ +mixed_list = [0, true, { + key_1 = true, + key_2 = true, + key_3 = true, +}, "Hello!"] + +mixed_list_2 = [ + 0, + true, + { + key_1 = true, + key_2 = true, + key_3 = true, + }, + "Hello!", +] diff --git a/syntax/printer/testdata/mixed_object.expect b/syntax/printer/testdata/mixed_object.expect new file mode 100644 index 0000000000..8d301e1f0e --- /dev/null +++ b/syntax/printer/testdata/mixed_object.expect @@ -0,0 +1,8 @@ +mixed_object = { + key_1 = true, + key_2 = [0, true, { + inner_1 = true, + inner_2 = true, + }], +} + diff --git a/syntax/printer/testdata/mixed_object.in b/syntax/printer/testdata/mixed_object.in new file mode 100644 index 0000000000..9334cfafa1 --- /dev/null +++ b/syntax/printer/testdata/mixed_object.in @@ -0,0 +1,7 @@ +mixed_object = { + key_1 = true, + key_2 = [0, true, { + inner_1 = true, + inner_2 = true, + }], +} diff --git a/syntax/printer/testdata/object_align.expect b/syntax/printer/testdata/object_align.expect new file mode 100644 index 0000000000..631536a437 --- /dev/null +++ b/syntax/printer/testdata/object_align.expect @@ -0,0 +1,11 @@ +block { + some_object = { + key_1 = 5, + long_key = 10, + longer_key = { + inner_key = true, + inner_key_2 = false, + }, + other_key = [0, 1, 2], + } +} diff --git a/syntax/printer/testdata/object_align.in b/syntax/printer/testdata/object_align.in new file mode 100644 index 0000000000..b618f61dfc --- /dev/null +++ b/syntax/printer/testdata/object_align.in @@ -0,0 +1,11 @@ +block { + some_object = { + key_1 = 5, + long_key = 10, + longer_key = { + inner_key = true, + inner_key_2 = false, + }, + other_key = [0, 1, 2], + } +} diff --git a/syntax/printer/testdata/oneline_block.expect b/syntax/printer/testdata/oneline_block.expect new file mode 100644 index 0000000000..8cd2c69d25 --- /dev/null +++ b/syntax/printer/testdata/oneline_block.expect @@ -0,0 +1,11 @@ +block { } + +block { } + +block { } + +block { } + +block { + // Comments should be kept. +} diff --git a/syntax/printer/testdata/oneline_block.in b/syntax/printer/testdata/oneline_block.in new file mode 100644 index 0000000000..2c1f74363a --- /dev/null +++ b/syntax/printer/testdata/oneline_block.in @@ -0,0 +1,14 @@ +block {} + +block { } + +block { +} + +block { + +} + +block { + // Comments should be kept. +} diff --git a/syntax/printer/testdata/raw_string.expect b/syntax/printer/testdata/raw_string.expect new file mode 100644 index 0000000000..5837439569 --- /dev/null +++ b/syntax/printer/testdata/raw_string.expect @@ -0,0 +1,15 @@ +block "label" { + attr = `'\"attr` +} + +block "multi_line" { + attr = `'\"this +is +a +multi_line +attr'\"` +} + +block "json" { + attr = `{ "key": "value" }` +} \ No newline at end of file diff --git a/syntax/printer/testdata/raw_string.in b/syntax/printer/testdata/raw_string.in new file mode 100644 index 0000000000..5837439569 --- /dev/null +++ b/syntax/printer/testdata/raw_string.in @@ -0,0 +1,15 @@ +block "label" { + attr = `'\"attr` +} + +block "multi_line" { + attr = `'\"this +is +a +multi_line +attr'\"` +} + +block "json" { + attr = `{ "key": "value" }` +} \ No newline at end of file diff --git a/syntax/printer/testdata/raw_string_label_error.error b/syntax/printer/testdata/raw_string_label_error.error new file mode 100644 index 0000000000..dd3f7f7c8b --- /dev/null +++ b/syntax/printer/testdata/raw_string_label_error.error @@ -0,0 +1 @@ +expected block label to be a double quoted string, but got "`multi_line`" \ No newline at end of file diff --git a/syntax/printer/testdata/raw_string_label_error.in b/syntax/printer/testdata/raw_string_label_error.in new file mode 100644 index 0000000000..1e16f9ae07 --- /dev/null +++ b/syntax/printer/testdata/raw_string_label_error.in @@ -0,0 +1,15 @@ +block "label" { + attr = `'\"attr` +} + +block `multi_line` { + attr = `'\"this +is +a +multi_line +attr'\"` +} + +block `json` { + attr = `{ "key": "value" }` +} \ No newline at end of file diff --git a/syntax/printer/trimmer.go b/syntax/printer/trimmer.go new file mode 100644 index 0000000000..5a76c0c79d --- /dev/null +++ b/syntax/printer/trimmer.go @@ -0,0 +1,115 @@ +package printer + +import ( + "io" + "text/tabwriter" +) + +// A trimmer is an io.Writer which filters tabwriter.Escape characters, +// trailing blanks and tabs from lines, and converting \f and \v characters +// into \n and \t (if no text/tabwriter is used when printing). +// +// Text wrapped by tabwriter.Escape characters is written to the underlying +// io.Writer unmodified. +type trimmer struct { + next io.Writer + state int + space []byte +} + +const ( + trimStateSpace = iota // Trimmer is reading space characters + trimStateEscape // Trimmer is reading escaped characters + trimStateText // Trimmer is reading text +) + +func (t *trimmer) discardWhitespace() { + t.state = trimStateSpace + t.space = t.space[0:0] +} + +func (t *trimmer) Write(data []byte) (n int, err error) { + // textStart holds the index of the start of a chunk of text not containing + // whitespace. It is reset every time a new chunk of text is encountered. + var textStart int + + for off, b := range data { + // Convert \v to \t + if b == '\v' { + b = '\t' + } + + switch t.state { + case trimStateSpace: + // Accumulate tabs and spaces in t.space until finding a non-tab or + // non-space character. + // + // If we find a newline, we write it directly and discard our pending + // whitespace (so that trailing whitespace up to the newline is ignored). + // + // If we find a tabwriter.Escape or text character we transition states. + switch b { + case '\t', ' ': + t.space = append(t.space, b) + case '\n', '\f': + // Discard all unwritten whitespace before the end of the line and write + // a newline. + t.discardWhitespace() + _, err = t.next.Write([]byte("\n")) + case tabwriter.Escape: + _, err = t.next.Write(t.space) + t.state = trimStateEscape + textStart = off + 1 // Skip escape character + default: + // Non-space character. Write our pending whitespace + // and then move to text state. + _, err = t.next.Write(t.space) + t.state = trimStateText + textStart = off + } + + case trimStateText: + // We're reading a chunk of text. Accumulate characters in the chunk + // until we find a whitespace character or a tabwriter.Escape. + switch b { + case '\t', ' ': + _, err = t.next.Write(data[textStart:off]) + t.discardWhitespace() + t.space = append(t.space, b) + case '\n', '\f': + _, err = t.next.Write(data[textStart:off]) + t.discardWhitespace() + if err == nil { + _, err = t.next.Write([]byte("\n")) + } + case tabwriter.Escape: + _, err = t.next.Write(data[textStart:off]) + t.state = trimStateEscape + textStart = off + 1 // +1: skip tabwriter.Escape + } + + case trimStateEscape: + // Accumulate everything until finding the closing tabwriter.Escape. + if b == tabwriter.Escape { + _, err = t.next.Write(data[textStart:off]) + t.discardWhitespace() + } + + default: + panic("unreachable") + } + if err != nil { + return off, err + } + } + n = len(data) + + // Flush the remainder of the text (as long as it's not whitespace). + switch t.state { + case trimStateEscape, trimStateText: + _, err = t.next.Write(data[textStart:n]) + t.discardWhitespace() + } + + return +} diff --git a/syntax/printer/walker.go b/syntax/printer/walker.go new file mode 100644 index 0000000000..01f71b21bd --- /dev/null +++ b/syntax/printer/walker.go @@ -0,0 +1,338 @@ +package printer + +import ( + "fmt" + "strings" + + "github.com/grafana/river/ast" + "github.com/grafana/river/token" +) + +// A walker walks an AST and sends lexical tokens and formatting information to +// a printer. +type walker struct { + p *printer +} + +func (w *walker) Walk(node ast.Node) error { + switch node := node.(type) { + case *ast.File: + w.walkFile(node) + case ast.Body: + w.walkStmts(node) + case ast.Stmt: + w.walkStmt(node) + case ast.Expr: + w.walkExpr(node) + default: + return fmt.Errorf("unsupported node type %T", node) + } + + return nil +} + +func (w *walker) walkFile(f *ast.File) { + w.p.SetComments(f.Comments) + w.walkStmts(f.Body) +} + +func (w *walker) walkStmts(ss []ast.Stmt) { + for i, s := range ss { + var addedSpacing bool + + // Two blocks should always be separated by a blank line. + if _, isBlock := s.(*ast.BlockStmt); i > 0 && isBlock { + w.p.Write(wsFormfeed) + addedSpacing = true + } + + // A blank line should always be added if there is a blank line in the + // source between two statements. + if i > 0 && !addedSpacing { + var ( + prevLine = ast.EndPos(ss[i-1]).Position().Line + curLine = ast.StartPos(ss[i-0]).Position().Line + + lineDiff = curLine - prevLine + ) + + if lineDiff > 1 { + w.p.Write(wsFormfeed) + } + } + + w.walkStmt(s) + + // Statements which cross multiple lines don't belong to the same row run. + // Add a formfeed to start a new row run if the node crossed more than one + // line, otherwise add the normal newline. + if nodeLines(s) > 1 { + w.p.Write(wsFormfeed) + } else { + w.p.Write(wsNewline) + } + } +} + +func nodeLines(n ast.Node) int { + var ( + startLine = ast.StartPos(n).Position().Line + endLine = ast.EndPos(n).Position().Line + ) + + return endLine - startLine + 1 +} + +func (w *walker) walkStmt(s ast.Stmt) { + switch s := s.(type) { + case *ast.AttributeStmt: + w.walkAttributeStmt(s) + case *ast.BlockStmt: + w.walkBlockStmt(s) + } +} + +func (w *walker) walkAttributeStmt(s *ast.AttributeStmt) { + w.p.Write(s.Name.NamePos, s.Name, wsVTab, token.ASSIGN, wsBlank) + w.walkExpr(s.Value) +} + +func (w *walker) walkBlockStmt(s *ast.BlockStmt) { + joined := strings.Join(s.Name, ".") + + w.p.Write( + s.NamePos, + &ast.Ident{Name: joined, NamePos: s.NamePos}, + ) + + if s.Label != "" { + label := fmt.Sprintf("%q", s.Label) + + w.p.Write( + wsBlank, + s.LabelPos, + &ast.LiteralExpr{Kind: token.STRING, Value: label}, + ) + } + + w.p.Write( + wsBlank, + s.LCurlyPos, token.LCURLY, wsIndent, + ) + + if len(s.Body) > 0 { + // Add a formfeed to start a new row run before writing any statements. + w.p.Write(wsFormfeed) + w.walkStmts(s.Body) + } else { + // There's no statements, but add a blank line between the left and right + // curly anyway. + w.p.Write(wsBlank) + } + + w.p.Write(wsUnindent, s.RCurlyPos, token.RCURLY) +} + +func (w *walker) walkExpr(e ast.Expr) { + switch e := e.(type) { + case *ast.LiteralExpr: + w.p.Write(e.ValuePos, e) + + case *ast.ArrayExpr: + w.walkArrayExpr(e) + + case *ast.ObjectExpr: + w.walkObjectExpr(e) + + case *ast.IdentifierExpr: + w.p.Write(e.Ident.NamePos, e.Ident) + + case *ast.AccessExpr: + w.walkExpr(e.Value) + w.p.Write(token.DOT, e.Name) + + case *ast.IndexExpr: + w.walkExpr(e.Value) + w.p.Write(e.LBrackPos, token.LBRACK) + w.walkExpr(e.Index) + w.p.Write(e.RBrackPos, token.RBRACK) + + case *ast.CallExpr: + w.walkCallExpr(e) + + case *ast.UnaryExpr: + w.p.Write(e.KindPos, e.Kind) + w.walkExpr(e.Value) + + case *ast.BinaryExpr: + // TODO(rfratto): + // + // 1. allow RHS to be on a new line + // + // 2. remove spacing between some operators to make precedence + // clearer like Go does + w.walkExpr(e.Left) + w.p.Write(wsBlank, e.KindPos, e.Kind, wsBlank) + w.walkExpr(e.Right) + + case *ast.ParenExpr: + w.p.Write(token.LPAREN) + w.walkExpr(e.Inner) + w.p.Write(token.RPAREN) + } +} + +func (w *walker) walkArrayExpr(e *ast.ArrayExpr) { + w.p.Write(e.LBrackPos, token.LBRACK) + prevPos := e.LBrackPos + + for i := 0; i < len(e.Elements); i++ { + var addedNewline bool + + elementPos := ast.StartPos(e.Elements[i]) + + // Add a newline if this element starts on a different line than the last + // element ended. + if differentLines(prevPos, elementPos) { + // Indent elements inside the array on different lines. The indent is + // done *before* the newline to make sure comments written before the + // newline are indented properly. + w.p.Write(wsIndent, wsFormfeed) + addedNewline = true + } else if i > 0 { + // Make sure a space is injected before the next element if two + // successive elements are on the same line. + w.p.Write(wsBlank) + } + prevPos = ast.EndPos(e.Elements[i]) + + // Write the expression. + w.walkExpr(e.Elements[i]) + + // Always add commas in between successive elements. + if i+1 < len(e.Elements) { + w.p.Write(token.COMMA) + } + + if addedNewline { + w.p.Write(wsUnindent) + } + } + + var addedSuffixNewline bool + + // If the closing bracket is on a different line than the final element, + // we need to add a trailing comma. + if len(e.Elements) > 0 && differentLines(prevPos, e.RBrackPos) { + // We add an indentation here so comments after the final element are + // indented. + w.p.Write(token.COMMA, wsIndent, wsFormfeed) + addedSuffixNewline = true + } + + if addedSuffixNewline { + w.p.Write(wsUnindent) + } + w.p.Write(e.RBrackPos, token.RBRACK) +} + +func (w *walker) walkObjectExpr(e *ast.ObjectExpr) { + w.p.Write(e.LCurlyPos, token.LCURLY, wsIndent) + + prevPos := e.LCurlyPos + + for i := 0; i < len(e.Fields); i++ { + field := e.Fields[i] + elementPos := ast.StartPos(field.Name) + + // Add a newline if this element starts on a different line than the last + // element ended. + if differentLines(prevPos, elementPos) { + // We want to align the equal sign for object attributes if the previous + // field only crossed one line. + if i > 0 && nodeLines(e.Fields[i-1].Value) == 1 { + w.p.Write(wsNewline) + } else { + w.p.Write(wsFormfeed) + } + } else if i > 0 { + // Make sure a space is injected before the next element if two successive + // elements are on the same line. + w.p.Write(wsBlank) + } + prevPos = ast.EndPos(field.Name) + + w.p.Write(field.Name.NamePos) + + // Write the field. + if field.Quoted { + w.p.Write(&ast.LiteralExpr{ + Kind: token.STRING, + ValuePos: field.Name.NamePos, + Value: fmt.Sprintf("%q", field.Name.Name), + }) + } else { + w.p.Write(field.Name) + } + + w.p.Write(wsVTab, token.ASSIGN, wsBlank) + w.walkExpr(field.Value) + + // Always add commas in between successive elements. + if i+1 < len(e.Fields) { + w.p.Write(token.COMMA) + } + } + + // If the closing bracket is on a different line than the final element, + // we need to add a trailing comma. + if len(e.Fields) > 0 && differentLines(prevPos, e.RCurlyPos) { + w.p.Write(token.COMMA, wsFormfeed) + } + + w.p.Write(wsUnindent, e.RCurlyPos, token.RCURLY) +} + +func (w *walker) walkCallExpr(e *ast.CallExpr) { + w.walkExpr(e.Value) + w.p.Write(token.LPAREN) + + prevPos := e.LParenPos + + for i, arg := range e.Args { + var addedNewline bool + + argPos := ast.StartPos(arg) + + // Add a newline if this element starts on a different line than the last + // element ended. + if differentLines(prevPos, argPos) { + w.p.Write(wsFormfeed, wsIndent) + addedNewline = true + } + + w.walkExpr(arg) + prevPos = ast.EndPos(arg) + + if i+1 < len(e.Args) { + w.p.Write(token.COMMA, wsBlank) + } + + if addedNewline { + w.p.Write(wsUnindent) + } + } + + // Add a final comma if the final argument is on a different line than the + // right parenthesis. + if differentLines(prevPos, e.RParenPos) { + w.p.Write(token.COMMA, wsFormfeed) + } + + w.p.Write(token.RPAREN) +} + +// differentLines returns true if a and b are on different lines. +func differentLines(a, b token.Pos) bool { + return a.Position().Line != b.Position().Line +} diff --git a/syntax/river.go b/syntax/river.go new file mode 100644 index 0000000000..0944a9e3be --- /dev/null +++ b/syntax/river.go @@ -0,0 +1,346 @@ +// Package river implements a high-level API for decoding and encoding River +// configuration files. The mapping between River and Go values is described in +// the documentation for the Unmarshal and Marshal functions. +// +// Lower-level APIs which give more control over configuration evaluation are +// available in the inner packages. The implementation of this package is +// minimal and serves as a reference for how to consume the lower-level +// packages. +package river + +import ( + "bytes" + "io" + + "github.com/grafana/river/parser" + "github.com/grafana/river/token/builder" + "github.com/grafana/river/vm" +) + +// Marshal returns the pretty-printed encoding of v as a River configuration +// file. v must be a Go struct with river struct tags which determine the +// structure of the resulting file. +// +// Marshal traverses the value v recursively, encoding each struct field as a +// River block or River attribute, based on the flags provided to the river +// struct tag. +// +// When a struct field represents a River block, Marshal creates a new block +// and recursively encodes the value as the body of the block. The name of the +// created block is taken from the name specified by the river struct tag. +// +// Struct fields which represent River blocks must be either a Go struct or a +// slice of Go structs. When the field is a Go struct, its value is encoded as +// a single block. When the field is a slice of Go structs, a block is created +// for each element in the slice. +// +// When encoding a block, if the inner Go struct has a struct field +// representing a River block label, the value of that field is used as the +// label name for the created block. Fields used for River block labels must be +// the string type. When specified, there must not be more than one struct +// field which represents a block label. +// +// The river tag specifies a name, possibly followed by a comma-separated list +// of options. The name must be empty if the provided options do not support a +// name being defined. The following provides examples for all supported struct +// field tags with their meanings: +// +// // Field appears as a block named "example". It will always appear in the +// // resulting encoding. When decoding, "example" is treated as a required +// // block and must be present in the source text. +// Field struct{...} `river:"example,block"` +// +// // Field appears as a set of blocks named "example." It will appear in the +// // resulting encoding if there is at least one element in the slice. When +// // decoding, "example" is treated as a required block and at least one +// // "example" block must be present in the source text. +// Field []struct{...} `river:"example,block"` +// +// // Field appears as block named "example." It will always appear in the +// // resulting encoding. When decoding, "example" is treated as an optional +// // block and can be omitted from the source text. +// Field struct{...} `river:"example,block,optional"` +// +// // Field appears as a set of blocks named "example." It will appear in the +// // resulting encoding if there is at least one element in the slice. When +// // decoding, "example" is treated as an optional block and can be omitted +// // from the source text. +// Field []struct{...} `river:"example,block,optional"` +// +// // Field appears as an attribute named "example." It will always appear in +// // the resulting encoding. When decoding, "example" is treated as a +// // required attribute and must be present in the source text. +// Field bool `river:"example,attr"` +// +// // Field appears as an attribute named "example." If the field's value is +// // the Go zero value, "example" is omitted from the resulting encoding. +// // When decoding, "example" is treated as an optional attribute and can be +// // omitted from the source text. +// Field bool `river:"example,attr,optional"` +// +// // The value of Field appears as the block label for the struct being +// // converted into a block. When decoding, a block label must be provided. +// Field string `river:",label"` +// +// // The inner attributes and blocks of Field are exposed as top-level +// // attributes and blocks of the outer struct. +// Field struct{...} `river:",squash"` +// +// // Field appears as a set of blocks starting with "example.". Only the +// // first set element in the struct will be encoded. Each field in struct +// // must be a block. The name of the block is prepended to the enum name. +// // When decoding, enum blocks are treated as optional blocks and can be +// // omitted from the source text. +// Field []struct{...} `river:"example,enum"` +// +// // Field is equivalent to `river:"example,enum"`. +// Field []struct{...} `river:"example,enum,optional"` +// +// If a river tag specifies a required or optional block, the name is permitted +// to contain period `.` characters. +// +// Marshal will panic if it encounters a struct with invalid river tags. +// +// When a struct field represents a River attribute, Marshal encodes the struct +// value as a River value. The attribute name will be taken from the name +// specified by the river struct tag. See MarshalValue for the rules used to +// convert a Go value into a River value. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := NewEncoder(&buf).Encode(v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalValue returns the pretty-printed encoding of v as a River value. +// +// MarshalValue traverses the value v recursively. If an encountered value +// implements the encoding.TextMarshaler interface, MarshalValue calls its +// MarshalText method and encodes the result as a River string. If a value +// implements the Capsule interface, it always encodes as a River capsule +// value. +// +// Otherwise, MarshalValue uses the following type-dependent default encodings: +// +// Boolean values encode to River bools. +// +// Floating point, integer, and Number values encode to River numbers. +// +// String values encode to River strings. +// +// Array and slice values encode to River arrays, except that []byte is +// converted into a River string. Nil slices encode as an empty array and nil +// []byte slices encode as an empty string. +// +// Structs encode to River objects, using Go struct field tags to determine the +// resulting structure of the River object. Each exported struct field with a +// river tag becomes an object field, using the tag name as the field name. +// Other struct fields are ignored. If no struct field has a river tag, the +// struct encodes to a River capsule instead. +// +// Function values encode to River functions, which appear in the resulting +// text as strings formatted as "function(GO_TYPE)". +// +// All other Go values encode to River capsules, which appear in the resulting +// text as strings formatted as "capsule(GO_TYPE)". +// +// The river tag specifies the field name, possibly followed by a +// comma-separated list of options. The following provides examples for all +// supported struct field tags with their meanings: +// +// // Field appears as an object field named "my_name". It will always +// // appear in the resulting encoding. When decoding, "my_name" is treated +// // as a required attribute and must be present in the source text. +// Field bool `river:"my_name,attr"` +// +// // Field appears as an object field named "my_name". If the field's value +// // is the Go zero value, "example" is omitted from the resulting encoding. +// // When decoding, "my_name" is treated as an optional attribute and can be +// // omitted from the source text. +// Field bool `river:"my_name,attr,optional"` +func MarshalValue(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := NewEncoder(&buf).EncodeValue(v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Encoder writes River configuration to an output stream. Call NewEncoder to +// create instances of Encoder. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new Encoder which writes configuration to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// Encode converts the value pointed to by v into a River configuration file +// and writes the result to the Decoder's output stream. +// +// See the documentation for Marshal for details about the conversion of Go +// values into River configuration. +func (enc *Encoder) Encode(v interface{}) error { + f := builder.NewFile() + f.Body().AppendFrom(v) + + _, err := f.WriteTo(enc.w) + return err +} + +// EncodeValue converts the value pointed to by v into a River value and writes +// the result to the Decoder's output stream. +// +// See the documentation for MarshalValue for details about the conversion of +// Go values into River values. +func (enc *Encoder) EncodeValue(v interface{}) error { + expr := builder.NewExpr() + expr.SetValue(v) + + _, err := expr.WriteTo(enc.w) + return err +} + +// Unmarshal converts the River configuration file specified by in and stores +// it in the struct value pointed to by v. If v is nil or not a pointer, +// Unmarshal panics. The configuration specified by in may use expressions to +// compute values while unmarshaling. Refer to the River language documentation +// for the list of valid formatting and expression rules. +// +// Unmarshal uses the inverse of the encoding rules that Marshal uses, +// allocating maps, slices, and pointers as necessary. +// +// To unmarshal a River body into a map[string]T, Unmarshal assigns each +// attribute to a key in the map, and decodes the attribute's value as the +// value for the map entry. Only attribute statements are allowed when +// unmarshaling into a map. +// +// To unmarshal a River body into a struct, Unmarshal matches incoming +// attributes and blocks to the river struct tags specified by v. Incoming +// attribute and blocks which do not match to a river struct tag cause a +// decoding error. Additionally, any attribute or block marked as required by +// the river struct tag that are not present in the source text will generate a +// decoding error. +// +// To unmarshal a list of River blocks into a slice, Unmarshal resets the slice +// length to zero and then appends each element to the slice. +// +// To unmarshal a list of River blocks into a Go array, Unmarshal decodes each +// block into the corresponding Go array element. If the number of River blocks +// does not match the length of the Go array, a decoding error is returned. +// +// Unmarshal follows the rules specified by UnmarshalValue when unmarshaling +// the value of an attribute. +func Unmarshal(in []byte, v interface{}) error { + dec := NewDecoder(bytes.NewReader(in)) + return dec.Decode(v) +} + +// UnmarshalValue converts the River configuration file specified by in and +// stores it in the value pointed to by v. If v is nil or not a pointer, +// UnmarshalValue panics. The configuration specified by in may use expressions +// to compute values while unmarshaling. Refer to the River language +// documentation for the list of valid formatting and expression rules. +// +// Unmarshal uses the inverse of the encoding rules that MarshalValue uses, +// allocating maps, slices, and pointers as necessary, with the following +// additional rules: +// +// After converting a River value into its Go value counterpart, the Go value +// may be converted into a capsule if the capsule type implements +// ConvertibleIntoCapsule. +// +// To unmarshal a River object into a struct, UnmarshalValue matches incoming +// object fields to the river struct tags specified by v. Incoming object +// fields which do not match to a river struct tag cause a decoding error. +// Additionally, any object field marked as required by the river struct +// tag that are not present in the source text will generate a decoding error. +// +// To unmarshal River into an interface value, Unmarshal stores one of the +// following: +// +// - bool, for River bools +// - float64, for floating point River numbers +// and integers which are too big to fit in either of int/int64/uint64 +// - int/int64/uint64, in this order of preference, for signed and unsigned +// River integer numbers, depending on how big they are +// - string, for River strings +// - []interface{}, for River arrays +// - map[string]interface{}, for River objects +// +// Capsule and function types will retain their original type when decoding +// into an interface value. +// +// To unmarshal a River array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// +// To unmarshal a River array into a Go array, Unmarshal decodes River array +// elements into the corresponding Go array element. If the number of River +// elements does not match the length of the Go array, a decoding error is +// returned. +// +// To unmarshal a River object into a Map, Unmarshal establishes a map to use. +// If the map is nil, Unmarshal allocates a new map. Otherwise, Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the River object into the map. The map's key type must +// be string. +func UnmarshalValue(in []byte, v interface{}) error { + dec := NewDecoder(bytes.NewReader(in)) + return dec.DecodeValue(v) +} + +// Decoder reads River configuration from an input stream. Call NewDecoder to +// create instances of Decoder. +type Decoder struct { + r io.Reader +} + +// NewDecoder returns a new Decoder which reads configuration from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// Decode reads the River-encoded file from the Decoder's input and stores it +// in the value pointed to by v. Data will be read from the Decoder's input +// until EOF is reached. +// +// See the documentation for Unmarshal for details about the conversion of River +// configuration into Go values. +func (dec *Decoder) Decode(v interface{}) error { + bb, err := io.ReadAll(dec.r) + if err != nil { + return err + } + + f, err := parser.ParseFile("", bb) + if err != nil { + return err + } + + eval := vm.New(f) + return eval.Evaluate(nil, v) +} + +// DecodeValue reads the River-encoded expression from the Decoder's input and +// stores it in the value pointed to by v. Data will be read from the Decoder's +// input until EOF is reached. +// +// See the documentation for UnmarshalValue for details about the conversion of +// River values into Go values. +func (dec *Decoder) DecodeValue(v interface{}) error { + bb, err := io.ReadAll(dec.r) + if err != nil { + return err + } + + f, err := parser.ParseExpression(string(bb)) + if err != nil { + return err + } + + eval := vm.New(f) + return eval.Evaluate(nil, v) +} diff --git a/syntax/river_test.go b/syntax/river_test.go new file mode 100644 index 0000000000..99247f54da --- /dev/null +++ b/syntax/river_test.go @@ -0,0 +1,152 @@ +package river_test + +import ( + "fmt" + "os" + + river "github.com/grafana/river" +) + +func ExampleUnmarshal() { + // Character is our block type which holds an individual character from a + // book. + type Character struct { + // Name of the character. The name is decoded from the block label. + Name string `river:",label"` + // Age of the character. The age is a required attribute within the block, + // and must be set in the config. + Age int `river:"age,attr"` + // Location the character lives in. The location is an optional attribute + // within the block. Optional attributes do not have to bet set. + Location string `river:"location,attr,optional"` + } + + // Book is our overall type where we decode the overall River file into. + type Book struct { + // Title of the book (required attribute). + Title string `river:"title,attr"` + // List of characters. Each character is a labeled block. The optional tag + // means that it is valid not provide a character block. Decoding into a + // slice permits there to be multiple specified character blocks. + Characters []*Character `river:"character,block,optional"` + } + + // Create our book with two characters. + input := ` + title = "Wheel of Time" + + character "Rand" { + age = 19 + location = "Two Rivers" + } + + character "Perrin" { + age = 19 + location = "Two Rivers" + } + ` + + // Unmarshal the config into our Book type and print out the data. + var b Book + if err := river.Unmarshal([]byte(input), &b); err != nil { + panic(err) + } + + fmt.Printf("%s characters:\n", b.Title) + + for _, c := range b.Characters { + if c.Location != "" { + fmt.Printf("\t%s (age %d, location %s)\n", c.Name, c.Age, c.Location) + } else { + fmt.Printf("\t%s (age %d)\n", c.Name, c.Age) + } + } + + // Output: + // Wheel of Time characters: + // Rand (age 19, location Two Rivers) + // Perrin (age 19, location Two Rivers) +} + +// This example shows how functions may be called within user configurations. +// We focus on the `env` function from the standard library, which retrieves a +// value from an environment variable. +func ExampleUnmarshal_functions() { + // Set an environment variable to use in the test. + _ = os.Setenv("EXAMPLE", "Jane Doe") + + type Data struct { + String string `river:"string,attr"` + } + + input := ` + string = env("EXAMPLE") + ` + + var d Data + if err := river.Unmarshal([]byte(input), &d); err != nil { + panic(err) + } + + fmt.Println(d.String) + // Output: Jane Doe +} + +func ExampleUnmarshalValue() { + input := `3 + 5` + + var num int + if err := river.UnmarshalValue([]byte(input), &num); err != nil { + panic(err) + } + + fmt.Println(num) + // Output: 8 +} + +func ExampleMarshal() { + type Person struct { + Name string `river:"name,attr"` + Age int `river:"age,attr"` + Location string `river:"location,attr,optional"` + } + + p := Person{ + Name: "John Doe", + Age: 43, + } + + bb, err := river.Marshal(p) + if err != nil { + panic(err) + } + + fmt.Println(string(bb)) + // Output: + // name = "John Doe" + // age = 43 +} + +func ExampleMarshalValue() { + type Person struct { + Name string `river:"name,attr"` + Age int `river:"age,attr"` + } + + p := Person{ + Name: "John Doe", + Age: 43, + } + + bb, err := river.MarshalValue(p) + if err != nil { + panic(err) + } + + fmt.Println(string(bb)) + // Output: + // { + // name = "John Doe", + // age = 43, + // } +} diff --git a/syntax/rivertypes/optional_secret.go b/syntax/rivertypes/optional_secret.go new file mode 100644 index 0000000000..75648af046 --- /dev/null +++ b/syntax/rivertypes/optional_secret.go @@ -0,0 +1,84 @@ +package rivertypes + +import ( + "fmt" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token" + "github.com/grafana/river/token/builder" +) + +// OptionalSecret holds a potentially sensitive value. When IsSecret is true, +// the OptionalSecret's Value will be treated as sensitive and will be hidden +// from users when rendering River. +// +// OptionalSecrets may be converted from river strings and the Secret type, +// which will set IsSecret accordingly. +// +// Additionally, OptionalSecrets may be converted into the Secret type +// regardless of the value of IsSecret. OptionalSecret can be converted into a +// string as long as IsSecret is false. +type OptionalSecret struct { + IsSecret bool + Value string +} + +var ( + _ value.Capsule = OptionalSecret{} + _ value.ConvertibleIntoCapsule = OptionalSecret{} + _ value.ConvertibleFromCapsule = (*OptionalSecret)(nil) + + _ builder.Tokenizer = OptionalSecret{} +) + +// RiverCapsule marks OptionalSecret as a RiverCapsule. +func (s OptionalSecret) RiverCapsule() {} + +// ConvertInto converts the OptionalSecret and stores it into the Go value +// pointed at by dst. OptionalSecrets can always be converted into *Secret. +// OptionalSecrets can only be converted into *string if IsSecret is false. In +// other cases, this method will return an explicit error or +// river.ErrNoConversion. +func (s OptionalSecret) ConvertInto(dst interface{}) error { + switch dst := dst.(type) { + case *Secret: + *dst = Secret(s.Value) + return nil + case *string: + if s.IsSecret { + return fmt.Errorf("secrets may not be converted into strings") + } + *dst = s.Value + return nil + } + + return value.ErrNoConversion +} + +// ConvertFrom converts the src value and stores it into the OptionalSecret s. +// Secrets and strings can be converted into an OptionalSecret. In other +// cases, this method will return river.ErrNoConversion. +func (s *OptionalSecret) ConvertFrom(src interface{}) error { + switch src := src.(type) { + case Secret: + *s = OptionalSecret{IsSecret: true, Value: string(src)} + return nil + case string: + *s = OptionalSecret{Value: src} + return nil + } + + return value.ErrNoConversion +} + +// RiverTokenize returns a set of custom tokens to represent this value in +// River text. +func (s OptionalSecret) RiverTokenize() []builder.Token { + if s.IsSecret { + return []builder.Token{{Tok: token.LITERAL, Lit: "(secret)"}} + } + return []builder.Token{{ + Tok: token.STRING, + Lit: fmt.Sprintf("%q", s.Value), + }} +} diff --git a/syntax/rivertypes/optional_secret_test.go b/syntax/rivertypes/optional_secret_test.go new file mode 100644 index 0000000000..bd8a0baeea --- /dev/null +++ b/syntax/rivertypes/optional_secret_test.go @@ -0,0 +1,92 @@ +package rivertypes_test + +import ( + "testing" + + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/token/builder" + "github.com/stretchr/testify/require" +) + +func TestOptionalSecret(t *testing.T) { + t.Run("non-sensitive conversion to string is allowed", func(t *testing.T) { + input := rivertypes.OptionalSecret{IsSecret: false, Value: "testval"} + + var s string + err := decodeTo(t, input, &s) + require.NoError(t, err) + require.Equal(t, "testval", s) + }) + + t.Run("sensitive conversion to string is disallowed", func(t *testing.T) { + input := rivertypes.OptionalSecret{IsSecret: true, Value: "testval"} + + var s string + err := decodeTo(t, input, &s) + require.NotNil(t, err) + require.Contains(t, err.Error(), "secrets may not be converted into strings") + }) + + t.Run("non-sensitive conversion to secret is allowed", func(t *testing.T) { + input := rivertypes.OptionalSecret{IsSecret: false, Value: "testval"} + + var s rivertypes.Secret + err := decodeTo(t, input, &s) + require.NoError(t, err) + require.Equal(t, rivertypes.Secret("testval"), s) + }) + + t.Run("sensitive conversion to secret is allowed", func(t *testing.T) { + input := rivertypes.OptionalSecret{IsSecret: true, Value: "testval"} + + var s rivertypes.Secret + err := decodeTo(t, input, &s) + require.NoError(t, err) + require.Equal(t, rivertypes.Secret("testval"), s) + }) + + t.Run("conversion from string is allowed", func(t *testing.T) { + var s rivertypes.OptionalSecret + err := decodeTo(t, string("Hello, world!"), &s) + require.NoError(t, err) + + expect := rivertypes.OptionalSecret{ + IsSecret: false, + Value: "Hello, world!", + } + require.Equal(t, expect, s) + }) + + t.Run("conversion from secret is allowed", func(t *testing.T) { + var s rivertypes.OptionalSecret + err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + require.NoError(t, err) + + expect := rivertypes.OptionalSecret{ + IsSecret: true, + Value: "Hello, world!", + } + require.Equal(t, expect, s) + }) +} + +func TestOptionalSecret_Write(t *testing.T) { + tt := []struct { + name string + value interface{} + expect string + }{ + {"non-sensitive", rivertypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, + {"sensitive", rivertypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, + {"non-sensitive pointer", &rivertypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, + {"sensitive pointer", &rivertypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + be := builder.NewExpr() + be.SetValue(tc.value) + require.Equal(t, tc.expect, string(be.Bytes())) + }) + } +} diff --git a/syntax/rivertypes/secret.go b/syntax/rivertypes/secret.go new file mode 100644 index 0000000000..c2eb357d03 --- /dev/null +++ b/syntax/rivertypes/secret.go @@ -0,0 +1,65 @@ +package rivertypes + +import ( + "fmt" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token" + "github.com/grafana/river/token/builder" +) + +// Secret is a River capsule holding a sensitive string. The contents of a +// Secret are never displayed to the user when rendering River. +// +// Secret allows itself to be converted from a string River value, but never +// the inverse. This ensures that a user can't accidentally leak a sensitive +// value. +type Secret string + +var ( + _ value.Capsule = Secret("") + _ value.ConvertibleIntoCapsule = Secret("") + _ value.ConvertibleFromCapsule = (*Secret)(nil) + + _ builder.Tokenizer = Secret("") +) + +// RiverCapsule marks Secret as a RiverCapsule. +func (s Secret) RiverCapsule() {} + +// ConvertInto converts the Secret and stores it into the Go value pointed at +// by dst. Secrets can be converted into *OptionalSecret. In other cases, this +// method will return an explicit error or river.ErrNoConversion. +func (s Secret) ConvertInto(dst interface{}) error { + switch dst := dst.(type) { + case *OptionalSecret: + *dst = OptionalSecret{IsSecret: true, Value: string(s)} + return nil + case *string: + return fmt.Errorf("secrets may not be converted into strings") + } + + return value.ErrNoConversion +} + +// ConvertFrom converts the src value and stores it into the Secret s. +// OptionalSecrets and strings can be converted into a Secret. In other cases, +// this method will return river.ErrNoConversion. +func (s *Secret) ConvertFrom(src interface{}) error { + switch src := src.(type) { + case OptionalSecret: + *s = Secret(src.Value) + return nil + case string: + *s = Secret(src) + return nil + } + + return value.ErrNoConversion +} + +// RiverTokenize returns a set of custom tokens to represent this value in +// River text. +func (s Secret) RiverTokenize() []builder.Token { + return []builder.Token{{Tok: token.LITERAL, Lit: "(secret)"}} +} diff --git a/syntax/rivertypes/secret_test.go b/syntax/rivertypes/secret_test.go new file mode 100644 index 0000000000..cade74647b --- /dev/null +++ b/syntax/rivertypes/secret_test.go @@ -0,0 +1,47 @@ +package rivertypes_test + +import ( + "testing" + + "github.com/grafana/river/parser" + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func TestSecret(t *testing.T) { + t.Run("strings can be converted to secret", func(t *testing.T) { + var s rivertypes.Secret + err := decodeTo(t, string("Hello, world!"), &s) + require.NoError(t, err) + require.Equal(t, rivertypes.Secret("Hello, world!"), s) + }) + + t.Run("secrets cannot be converted to strings", func(t *testing.T) { + var s string + err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + require.NotNil(t, err) + require.Contains(t, err.Error(), "secrets may not be converted into strings") + }) + + t.Run("secrets can be passed to secrets", func(t *testing.T) { + var s rivertypes.Secret + err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + require.NoError(t, err) + require.Equal(t, rivertypes.Secret("Hello, world!"), s) + }) +} + +func decodeTo(t *testing.T, input interface{}, target interface{}) error { + t.Helper() + + expr, err := parser.ParseExpression("val") + require.NoError(t, err) + + eval := vm.New(expr) + return eval.Evaluate(&vm.Scope{ + Variables: map[string]interface{}{ + "val": input, + }, + }, target) +} diff --git a/syntax/scanner/identifier.go b/syntax/scanner/identifier.go new file mode 100644 index 0000000000..ed2239e060 --- /dev/null +++ b/syntax/scanner/identifier.go @@ -0,0 +1,60 @@ +package scanner + +import ( + "fmt" + + "github.com/grafana/river/token" +) + +// IsValidIdentifier returns true if the given string is a valid river +// identifier. +func IsValidIdentifier(in string) bool { + s := New(token.NewFile(""), []byte(in), nil, 0) + _, tok, lit := s.Scan() + return tok == token.IDENT && lit == in +} + +// SanitizeIdentifier will return the given string mutated into a valid river +// identifier. If the given string is already a valid identifier, it will be +// returned unchanged. +// +// This should be used with caution since the different inputs can result in +// identical outputs. +func SanitizeIdentifier(in string) (string, error) { + if in == "" { + return "", fmt.Errorf("cannot generate a new identifier for an empty string") + } + + if IsValidIdentifier(in) { + return in, nil + } + + newValue := generateNewIdentifier(in) + if !IsValidIdentifier(newValue) { + panic(fmt.Errorf("invalid identifier %q generated for `%q`", newValue, in)) + } + + return newValue, nil +} + +// generateNewIdentifier expects a valid river prefix and replacement +// string and returns a new identifier based on the given input. +func generateNewIdentifier(in string) string { + newValue := "" + for i, c := range in { + if i == 0 { + if isDigit(c) { + newValue = "_" + } + } + + if !(isLetter(c) || isDigit(c)) { + newValue += "_" + continue + } + + newValue += string(c) + } + + return newValue +} diff --git a/syntax/scanner/identifier_test.go b/syntax/scanner/identifier_test.go new file mode 100644 index 0000000000..e1dfead833 --- /dev/null +++ b/syntax/scanner/identifier_test.go @@ -0,0 +1,92 @@ +package scanner_test + +import ( + "testing" + + "github.com/grafana/river/scanner" + "github.com/stretchr/testify/require" +) + +var validTestCases = []struct { + name string + identifier string + expect bool +}{ + {"empty", "", false}, + {"start_number", "0identifier_1", false}, + {"start_char", "identifier_1", true}, + {"start_underscore", "_identifier_1", true}, + {"special_chars", "!@#$%^&*()", false}, + {"special_char", "identifier_1!", false}, + {"spaces", "identifier _ 1", false}, +} + +func TestIsValidIdentifier(t *testing.T) { + for _, tc := range validTestCases { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expect, scanner.IsValidIdentifier(tc.identifier)) + }) + } +} + +func BenchmarkIsValidIdentifier(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tc := range validTestCases { + _ = scanner.IsValidIdentifier(tc.identifier) + } + } +} + +var sanitizeTestCases = []struct { + name string + identifier string + expectIdentifier string + expectErr string +}{ + {"empty", "", "", "cannot generate a new identifier for an empty string"}, + {"start_number", "0identifier_1", "_0identifier_1", ""}, + {"start_char", "identifier_1", "identifier_1", ""}, + {"start_underscore", "_identifier_1", "_identifier_1", ""}, + {"special_chars", "!@#$%^&*()", "__________", ""}, + {"special_char", "identifier_1!", "identifier_1_", ""}, + {"spaces", "identifier _ 1", "identifier___1", ""}, +} + +func TestSanitizeIdentifier(t *testing.T) { + for _, tc := range sanitizeTestCases { + t.Run(tc.name, func(t *testing.T) { + newIdentifier, err := scanner.SanitizeIdentifier(tc.identifier) + if tc.expectErr != "" { + require.EqualError(t, err, tc.expectErr) + return + } + + require.NoError(t, err) + require.Equal(t, tc.expectIdentifier, newIdentifier) + }) + } +} + +func BenchmarkSanitizeIdentifier(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tc := range sanitizeTestCases { + _, _ = scanner.SanitizeIdentifier(tc.identifier) + } + } +} + +func FuzzSanitizeIdentifier(f *testing.F) { + for _, tc := range sanitizeTestCases { + f.Add(tc.identifier) + } + + f.Fuzz(func(t *testing.T, input string) { + newIdentifier, err := scanner.SanitizeIdentifier(input) + if input == "" { + require.EqualError(t, err, "cannot generate a new identifier for an empty string") + return + } + require.NoError(t, err) + require.True(t, scanner.IsValidIdentifier(newIdentifier)) + }) +} diff --git a/syntax/scanner/scanner.go b/syntax/scanner/scanner.go new file mode 100644 index 0000000000..e637a785b9 --- /dev/null +++ b/syntax/scanner/scanner.go @@ -0,0 +1,704 @@ +// Package scanner implements a lexical scanner for River source files. +package scanner + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/grafana/river/token" +) + +// EBNF for the scanner: +// +// letter = /* any unicode letter class character */ | "_" +// number = /* any unicode number class character */ +// digit = /* ASCII characters 0 through 9 */ +// digits = digit { digit } +// string_character = /* any unicode character that isn't '"' */ +// +// COMMENT = line_comment | block_comment +// line_comment = "//" { character } +// block_comment = "/*" { character | newline } "*/" +// +// IDENT = letter { letter | number } +// NULL = "null" +// BOOL = "true" | "false" +// NUMBER = digits +// FLOAT = ( digits | "." digits ) [ "e" [ "+" | "-" ] digits ] +// STRING = '"' { string_character | escape_sequence } '"' +// OR = "||" +// AND = "&&" +// NOT = "!" +// NEQ = "!=" +// ASSIGN = "=" +// EQ = "==" +// LT = "<" +// LTE = "<=" +// GT = ">" +// GTE = ">=" +// ADD = "+" +// SUB = "-" +// MUL = "*" +// DIV = "/" +// MOD = "%" +// POW = "^" +// LCURLY = "{" +// RCURLY = "}" +// LPAREN = "(" +// RPAREN = ")" +// LBRACK = "[" +// RBRACK = "]" +// COMMA = "," +// DOT = "." +// +// The EBNF for escape_sequence is currently undocumented; see scanEscape for +// details. The escape sequences supported by River are the same as the escape +// sequences supported by Go, except that it is always valid to use \' in +// strings (which in Go, is only valid to use in character literals). + +// ErrorHandler is invoked whenever there is an error. +type ErrorHandler func(pos token.Pos, msg string) + +// Mode is a set of bitwise flags which control scanner behavior. +type Mode uint + +const ( + // IncludeComments will cause comments to be returned as comment tokens. + // Otherwise, comments are ignored. + IncludeComments Mode = 1 << iota + + // Avoids automatic insertion of terminators (for testing only). + dontInsertTerms +) + +const ( + bom = 0xFEFF // byte order mark, permitted as very first character + eof = -1 // end of file +) + +// Scanner holds the internal state for the tokenizer while processing configs. +type Scanner struct { + file *token.File // Config file handle for tracking line offsets + input []byte // Input config + err ErrorHandler // Error reporting (may be nil) + mode Mode + + // scanning state variables: + + ch rune // Current character + offset int // Byte offset of ch + readOffset int // Byte offset of first character *after* ch + insertTerm bool // Insert a newline before the next newline + numErrors int // Number of errors encountered during scanning +} + +// New creates a new scanner to tokenize the provided input config. The scanner +// uses the provided file for adding line information for each token. The mode +// parameter customizes scanner behavior. +// +// Calls to Scan will invoke the error handler eh when a lexical error is found +// if eh is not nil. +func New(file *token.File, input []byte, eh ErrorHandler, mode Mode) *Scanner { + s := &Scanner{ + file: file, + input: input, + err: eh, + mode: mode, + } + + // Preload first character. + s.next() + if s.ch == bom { + s.next() // Ignore BOM if it's the first character. + } + return s +} + +// peek gets the next byte after the current character without advancing the +// scanner. Returns 0 if the scanner is at EOF. +func (s *Scanner) peek() byte { + if s.readOffset < len(s.input) { + return s.input[s.readOffset] + } + return 0 +} + +// next advances the scanner and reads the next Unicode character into s.ch. +// s.ch == eof indicates end of file. +func (s *Scanner) next() { + if s.readOffset >= len(s.input) { + s.offset = len(s.input) + if s.ch == '\n' { + // Make sure we track final newlines at the end of the file + s.file.AddLine(s.offset) + } + s.ch = eof + return + } + + s.offset = s.readOffset + if s.ch == '\n' { + s.file.AddLine(s.offset) + } + + r, width := rune(s.input[s.readOffset]), 1 + switch { + case r == 0: + s.onError(s.offset, "illegal character NUL") + case r >= utf8.RuneSelf: + r, width = utf8.DecodeRune(s.input[s.readOffset:]) + if r == utf8.RuneError && width == 1 { + s.onError(s.offset, "illegal UTF-8 encoding") + } else if r == bom && s.offset > 0 { + s.onError(s.offset, "illegal byte order mark") + } + } + s.readOffset += width + s.ch = r +} + +func (s *Scanner) onError(offset int, msg string) { + if s.err != nil { + s.err(s.file.Pos(offset), msg) + } + s.numErrors++ +} + +// NumErrors returns the current number of errors encountered during scanning. +// This is useful as a fallback to detect errors when no ErrorHandler was +// provided to the scanner. +func (s *Scanner) NumErrors() int { return s.numErrors } + +// Scan scans the next token and returns the token's position, the token +// itself, and the token's literal string (when applicable). The end of the +// input is indicated by token.EOF. +// +// If the returned token is a literal (such as token.STRING), then lit contains +// the corresponding literal text (including surrounding quotes). +// +// If the returned token is a keyword, lit is the keyword text that was +// scanned. +// +// If the returned token is token.TERMINATOR, lit will contain "\n". +// +// If the returned token is token.ILLEGAL, lit contains the offending +// character. +// +// In all other cases, lit will be an empty string. +// +// For more tolerant parsing, Scan returns a valid token character whenever +// possible when a syntax error was encountered. Callers must check NumErrors +// or the number of times the provided ErrorHandler was invoked to ensure there +// were no errors found during scanning. +// +// Scan will inject line information to the file provided by NewScanner. +// Returned token positions are relative to that file. +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // Start of current token. + pos = s.file.Pos(s.offset) + + var insertTerm bool + + // Determine token value + switch ch := s.ch; { + case isLetter(ch): + lit = s.scanIdentifier() + if len(lit) > 1 { // Keywords are always > 1 char + tok = token.Lookup(lit) + switch tok { + case token.IDENT, token.NULL, token.BOOL: + insertTerm = true + } + } else { + insertTerm = true + tok = token.IDENT + } + + case isDecimal(ch) || (ch == '.' && isDecimal(rune(s.peek()))): + insertTerm = true + tok, lit = s.scanNumber() + + default: + s.next() // Make progress + + // ch is now the first character in a sequence and s.ch is the second + // character. + + switch ch { + case eof: + if s.insertTerm { + s.insertTerm = false // Consumed EOF + return pos, token.TERMINATOR, "\n" + } + tok = token.EOF + + case '\n': + // This case is only reachable when s.insertTerm is true, since otherwise + // skipWhitespace consumes all other newlines. + s.insertTerm = false // Consumed newline + return pos, token.TERMINATOR, "\n" + + case '\'': + s.onError(pos.Offset(), "illegal single-quoted string; use double quotes") + insertTerm = true + tok = token.ILLEGAL + lit = s.scanString('\'', true, false) + + case '"': + insertTerm = true + tok = token.STRING + lit = s.scanString('"', true, false) + + case '`': + insertTerm = true + tok = token.STRING + lit = s.scanString('`', false, true) + + case '|': + if s.ch != '|' { + s.onError(s.offset, "missing second | in ||") + } else { + s.next() // consume second '|' + } + tok = token.OR + case '&': + if s.ch != '&' { + s.onError(s.offset, "missing second & in &&") + } else { + s.next() // consume second '&' + } + tok = token.AND + + case '!': // !, != + tok = s.switch2(token.NOT, token.NEQ, '=') + case '=': // =, == + tok = s.switch2(token.ASSIGN, token.EQ, '=') + case '<': // <, <= + tok = s.switch2(token.LT, token.LTE, '=') + case '>': // >, >= + tok = s.switch2(token.GT, token.GTE, '=') + case '+': + tok = token.ADD + case '-': + tok = token.SUB + case '*': + tok = token.MUL + case '/': + if s.ch == '/' || s.ch == '*' { + // //- or /*-style comment. + // + // If we're expected to inject a terminator, we can only do so if our + // comment goes to the end of the line. Otherwise, the terminator will + // have to be injected after the comment token. + if s.insertTerm && s.findLineEnd() { + // Reset position to the beginning of the comment. + s.ch = '/' + s.offset = pos.Offset() + s.readOffset = s.offset + 1 + s.insertTerm = false // Consumed newline + return pos, token.TERMINATOR, "\n" + } + comment := s.scanComment() + if s.mode&IncludeComments == 0 { + // Skip over comment + s.insertTerm = false // Consumed newline + goto scanAgain + } + tok = token.COMMENT + lit = comment + } else { + tok = token.DIV + } + + case '%': + tok = token.MOD + case '^': + tok = token.POW + case '{': + tok = token.LCURLY + case '}': + insertTerm = true + tok = token.RCURLY + case '(': + tok = token.LPAREN + case ')': + insertTerm = true + tok = token.RPAREN + case '[': + tok = token.LBRACK + case ']': + insertTerm = true + tok = token.RBRACK + case ',': + tok = token.COMMA + case '.': + // NOTE: Fractions starting with '.' are handled by outer switch + tok = token.DOT + + default: + // s.next() reports invalid BOMs so we don't need to repeat the error. + if ch != bom { + s.onError(pos.Offset(), fmt.Sprintf("illegal character %#U", ch)) + } + insertTerm = s.insertTerm // Preserve previous s.insertTerm state + tok = token.ILLEGAL + lit = string(ch) + } + } + + if s.mode&dontInsertTerms == 0 { + s.insertTerm = insertTerm + } + return +} + +func (s *Scanner) skipWhitespace() { + for s.ch == ' ' || s.ch == '\t' || s.ch == '\r' || (s.ch == '\n' && !s.insertTerm) { + s.next() + } +} + +func isLetter(ch rune) bool { + // We check for ASCII first as an optimization, and leave checking unicode + // (the slowest) to the very end. + return (lower(ch) >= 'a' && lower(ch) <= 'z') || + ch == '_' || + (ch >= utf8.RuneSelf && unicode.IsLetter(ch)) +} + +func lower(ch rune) rune { return ('a' - 'A') | ch } +func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } +func isDigit(ch rune) bool { + return isDecimal(ch) || (ch >= utf8.RuneSelf && unicode.IsDigit(ch)) +} + +// scanIdentifier reads the string of valid identifier characters starting at +// s.offet. It must only be called when s.ch is a valid character which starts +// an identifier. +// +// scanIdentifier is highly optimized for identifiers are modifications must be +// made carefully. +func (s *Scanner) scanIdentifier() string { + off := s.offset + + // Optimize for common case of ASCII identifiers. + // + // Ranging over s.input[s.readOffset:] avoids bounds checks and avoids + // conversions to runes. + // + // We'll fall back to the slower path if we find a non-ASCII character. + for readOffset, b := range s.input[s.readOffset:] { + if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9') { + // Common case: ASCII character; don't assign a rune. + continue + } + s.readOffset += readOffset + if b > 0 && b < utf8.RuneSelf { + // Optimization: ASCII character that isn't a letter or number; we've + // reached the end of the identifier sequence and can terminate. We avoid + // the call to s.next() and the corresponding setup. + // + // This optimization only works because we know that s.ch (the current + // character when scanIdentifier was called) is never '\n' since '\n' + // cannot start an identifier. + s.ch = rune(b) + s.offset = s.readOffset + s.readOffset++ + goto exit + } + + // The preceding character is valid for an identifier because + // scanIdentifier is only called when s.ch is a letter; calling s.next() at + // s.readOffset will reset the scanner state. + s.next() + for isLetter(s.ch) || isDigit(s.ch) { + s.next() + } + + // No more valid characters for the identifier; terminate. + goto exit + } + + s.offset = len(s.input) + s.readOffset = len(s.input) + s.ch = eof + +exit: + return string(s.input[off:s.offset]) +} + +func (s *Scanner) scanNumber() (tok token.Token, lit string) { + tok = token.NUMBER + off := s.offset + + // Integer part of number + if s.ch != '.' { + s.digits() + } + + // Fractional part of number + if s.ch == '.' { + tok = token.FLOAT + + s.next() + s.digits() + } + + // Exponent + if lower(s.ch) == 'e' { + tok = token.FLOAT + + s.next() + if s.ch == '+' || s.ch == '-' { + s.next() + } + + if s.digits() == 0 { + s.onError(off, "exponent has no digits") + } + } + + return tok, string(s.input[off:s.offset]) +} + +// digits scans a sequence of digits. +func (s *Scanner) digits() (count int) { + for isDecimal(s.ch) { + s.next() + count++ + } + return +} + +func (s *Scanner) scanString(until rune, escape bool, multiline bool) string { + // subtract 1 to account for the opening '"' which was already consumed by + // the scanner forcing progress. + off := s.offset - 1 + + for { + ch := s.ch + if (!multiline && ch == '\n') || ch == eof { + s.onError(off, "string literal not terminated") + break + } + s.next() + if ch == until { + break + } + if escape && ch == '\\' { + s.scanEscape() + } + } + + return string(s.input[off:s.offset]) +} + +// scanEscape parses an escape sequence. In case of a syntax error, scanEscape +// stops at the offending character without consuming it. +func (s *Scanner) scanEscape() { + off := s.offset + + var ( + n int + base, max uint32 + ) + + switch s.ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + s.next() + return + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + s.next() + n, base, max = 2, 16, 255 + case 'u': + s.next() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + s.next() + n, base, max = 8, 16, unicode.MaxRune + default: + msg := "unknown escape sequence" + if s.ch == eof { + msg = "escape sequence not terminated" + } + s.onError(off, msg) + return + } + + var x uint32 + for n > 0 { + d := uint32(digitVal(s.ch)) + if d >= base { + msg := fmt.Sprintf("illegal character %#U in escape sequence", s.ch) + if s.ch == eof { + msg = "escape sequence not terminated" + } + s.onError(off, msg) + return + } + x = x*base + d + s.next() + n-- + } + + if x > max || x >= 0xD800 && x < 0xE000 { + s.onError(off, "escape sequence is invalid Unicode code point") + } +} + +func digitVal(ch rune) int { + switch { + case ch >= '0' && ch <= '9': + return int(ch - '0') + case lower(ch) >= 'a' && lower(ch) <= 'f': + return int(lower(ch) - 'a' + 10) + } + return 16 // Larger than any legal digit val +} + +func (s *Scanner) scanComment() string { + // The initial character in the comment was already consumed from the scanner + // forcing progress. + // + // slashComment will be true when the comment is a //- or /*-style comment. + + var ( + off = s.offset - 1 // Offset of initial character + numCR = 0 + + blockComment = false + ) + + if s.ch == '/' { // NOTE: s.ch is second character in comment sequence + // //-style comment. + // + // The final '\n' is not considered to be part of the comment. + if s.ch == '/' { + s.next() // Consume second '/' + } + + for s.ch != '\n' && s.ch != eof { + if s.ch == '\r' { + numCR++ + } + s.next() + } + + goto exit + } + + // /*-style comment. + blockComment = true + s.next() + for s.ch != eof { + ch := s.ch + if ch == '\r' { + numCR++ + } + s.next() + if ch == '*' && s.ch == '/' { + s.next() + goto exit + } + } + + s.onError(off, "block comment not terminated") + +exit: + lit := s.input[off:s.offset] + + // On Windows, a single comment line may end in "\r\n". We want to remove the + // final \r. + if numCR > 0 && len(lit) >= 1 && lit[len(lit)-1] == '\r' { + lit = lit[:len(lit)-1] + numCR-- + } + + if numCR > 0 { + lit = stripCR(lit, blockComment) + } + + return string(lit) +} + +func stripCR(b []byte, blockComment bool) []byte { + c := make([]byte, len(b)) + i := 0 + + for j, ch := range b { + if ch != '\r' || blockComment && i > len("/*") && c[i-1] == '*' && j+1 < len(b) && b[j+1] == '/' { + c[i] = ch + i++ + } + } + + return c[:i] +} + +// findLineEnd checks to see if a comment runs to the end of the line. +func (s *Scanner) findLineEnd() bool { + // NOTE: initial '/' is already consumed by forcing the scanner to progress. + + defer func(off int) { + // Reset scanner state to where it was upon calling findLineEnd. + s.ch = '/' + s.offset = off + s.readOffset = off + 1 + s.next() // Consume initial starting '/' again + }(s.offset - 1) + + // Read ahead until a newline, EOF, or non-comment token is found. + // We loop to consume multiple sequences of comment tokens. + for s.ch == '/' || s.ch == '*' { + if s.ch == '/' { + // //-style comments always contain newlines. + return true + } + + // We're looking at a /*-style comment; look for its newline. + s.next() + for s.ch != eof { + ch := s.ch + if ch == '\n' { + return true + } + s.next() + if ch == '*' && s.ch == '/' { // End of block comment + s.next() + break + } + } + + // Check to see if there's a newline after the block comment. + s.skipWhitespace() // s.insertTerm is set + if s.ch == eof || s.ch == '\n' { + return true + } + if s.ch != '/' { + // Non-comment token + return false + } + s.next() // Consume '/' at the end of the /* style-comment + } + + return false +} + +// switch2 returns a if s.ch is next, b otherwise. The scanner will be advanced +// if b is returned. +// +// This is used for tokens which can either be a single character but also are +// the starting character for a 2-length token (i.e., = and ==). +func (s *Scanner) switch2(a, b token.Token, next rune) token.Token { //nolint:unparam + if s.ch == next { + s.next() + return b + } + return a +} diff --git a/syntax/scanner/scanner_test.go b/syntax/scanner/scanner_test.go new file mode 100644 index 0000000000..38ddcf58ca --- /dev/null +++ b/syntax/scanner/scanner_test.go @@ -0,0 +1,272 @@ +package scanner + +import ( + "path/filepath" + "testing" + + "github.com/grafana/river/token" + "github.com/stretchr/testify/assert" +) + +type tokenExample struct { + tok token.Token + lit string +} + +var tokens = []tokenExample{ + // Special tokens + {token.COMMENT, "/* a comment */"}, + {token.COMMENT, "// a comment \n"}, + {token.COMMENT, "/*\r*/"}, + {token.COMMENT, "/**\r/*/"}, // golang/go#11151 + {token.COMMENT, "/**\r\r/*/"}, + {token.COMMENT, "//\r\n"}, + + // Identifiers and basic type literals + {token.IDENT, "foobar"}, + {token.IDENT, "a۰۱۸"}, + {token.IDENT, "foo६४"}, + {token.IDENT, "bar9876"}, + {token.IDENT, "ŝ"}, // golang/go#4000 + {token.IDENT, "ŝfoo"}, // golang/go#4000 + {token.NUMBER, "0"}, + {token.NUMBER, "1"}, + {token.NUMBER, "123456789012345678890"}, + {token.NUMBER, "01234567"}, + {token.FLOAT, "0."}, + {token.FLOAT, ".0"}, + {token.FLOAT, "3.14159265"}, + {token.FLOAT, "1e0"}, + {token.FLOAT, "1e+100"}, + {token.FLOAT, "1e-100"}, + {token.FLOAT, "2.71828e-1000"}, + {token.STRING, `"Hello, world!"`}, + {token.STRING, "`Hello, world!\\\\`"}, + + // Operators and delimiters + {token.ADD, "+"}, + {token.SUB, "-"}, + {token.MUL, "*"}, + {token.DIV, "/"}, + {token.MOD, "%"}, + {token.POW, "^"}, + + {token.AND, "&&"}, + {token.OR, "||"}, + + {token.EQ, "=="}, + {token.LT, "<"}, + {token.GT, ">"}, + {token.ASSIGN, "="}, + {token.NOT, "!"}, + + {token.NEQ, "!="}, + {token.LTE, "<="}, + {token.GTE, ">="}, + + {token.LPAREN, "("}, + {token.LBRACK, "["}, + {token.LCURLY, "{"}, + {token.COMMA, ","}, + {token.DOT, "."}, + + {token.RPAREN, ")"}, + {token.RBRACK, "]"}, + {token.RCURLY, "}"}, + + // Keywords + {token.NULL, "null"}, + {token.BOOL, "true"}, + {token.BOOL, "false"}, +} + +const whitespace = " \t \n\n\n" // Various whitespace to separate tokens + +var source = func() []byte { + var src []byte + for _, t := range tokens { + src = append(src, t.lit...) + src = append(src, whitespace...) + } + return src +}() + +// FuzzScanner ensures that the scanner will always be able to reach EOF +// regardless of input. +func FuzzScanner(f *testing.F) { + // Add each token into the corpus + for _, t := range tokens { + f.Add([]byte(t.lit)) + } + // Then add the entire source + f.Add(source) + + f.Fuzz(func(t *testing.T, input []byte) { + f := token.NewFile(t.Name()) + + s := New(f, input, nil, IncludeComments) + + for { + _, tok, _ := s.Scan() + if tok == token.EOF { + break + } + } + }) +} + +func TestScanner_Scan(t *testing.T) { + whitespaceLinecount := newlineCount(whitespace) + + var eh ErrorHandler = func(_ token.Pos, msg string) { + t.Errorf("ErrorHandler called (msg = %s)", msg) + } + + f := token.NewFile(t.Name()) + s := New(f, source, eh, IncludeComments|dontInsertTerms) + + // Configure expected position + expectPos := token.Position{ + Filename: t.Name(), + Offset: 0, + Line: 1, + Column: 1, + } + + index := 0 + for { + pos, tok, lit := s.Scan() + + // Check position + checkPos(t, lit, tok, pos, expectPos) + + // Check token + e := tokenExample{token.EOF, ""} + if index < len(tokens) { + e = tokens[index] + index++ + } + assert.Equal(t, e.tok, tok) + + // Check literal + expectLit := "" + switch e.tok { + case token.COMMENT: + // no CRs in comments + expectLit = string(stripCR([]byte(e.lit), e.lit[1] == '*')) + if expectLit[1] == '/' { + // Line comment literals doesn't contain newline + expectLit = expectLit[0 : len(expectLit)-1] + } + case token.IDENT: + expectLit = e.lit + case token.NUMBER, token.FLOAT, token.STRING, token.NULL, token.BOOL: + expectLit = e.lit + } + assert.Equal(t, expectLit, lit) + + if tok == token.EOF { + break + } + + // Update position + expectPos.Offset += len(e.lit) + len(whitespace) + expectPos.Line += newlineCount(e.lit) + whitespaceLinecount + } + + if s.NumErrors() != 0 { + assert.Zero(t, s.NumErrors(), "expected number of scanning errors to be 0") + } +} + +func newlineCount(s string) int { + var n int + for i := 0; i < len(s); i++ { + if s[i] == '\n' { + n++ + } + } + return n +} + +func checkPos(t *testing.T, lit string, tok token.Token, p token.Pos, expected token.Position) { + t.Helper() + + pos := p.Position() + + // Check cleaned filenames so that we don't have to worry about different + // os.PathSeparator values. + if pos.Filename != expected.Filename && filepath.Clean(pos.Filename) != filepath.Clean(expected.Filename) { + assert.Equal(t, expected.Filename, pos.Filename, "Bad filename for %s (%q)", tok, lit) + } + + assert.Equal(t, expected.Offset, pos.Offset, "Bad offset for %s (%q)", tok, lit) + assert.Equal(t, expected.Line, pos.Line, "Bad line for %s (%q)", tok, lit) + assert.Equal(t, expected.Column, pos.Column, "Bad column for %s (%q)", tok, lit) +} + +var errorTests = []struct { + input string + tok token.Token + pos int + lit string + err string +}{ + {"\a", token.ILLEGAL, 0, "", "illegal character U+0007"}, + {`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"}, + {"..", token.DOT, 0, "", ""}, // two periods, not invalid token (golang/go#28112) + {`'illegal string'`, token.ILLEGAL, 0, "", "illegal single-quoted string; use double quotes"}, + {`""`, token.STRING, 0, `""`, ""}, + {`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"}, + {"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"}, + {"\"abc\n ", token.STRING, 0, `"abc`, "string literal not terminated"}, + {"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"}, + {"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"}, + {"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"}, // only first BOM is ignored + {"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"}, // only first BOM is ignored + {`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored + {"abc\x00def", token.IDENT, 3, "abc", "illegal character NUL"}, + {"abc\x00", token.IDENT, 3, "abc", "illegal character NUL"}, + {"10E", token.FLOAT, 0, "10E", "exponent has no digits"}, +} + +func TestScanner_Scan_Errors(t *testing.T) { + for _, e := range errorTests { + checkError(t, e.input, e.tok, e.pos, e.lit, e.err) + } +} + +func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) { + t.Helper() + + var ( + actualErrors int + latestError string + latestPos token.Pos + ) + + eh := func(pos token.Pos, msg string) { + actualErrors++ + latestError = msg + latestPos = pos + } + + f := token.NewFile(t.Name()) + s := New(f, []byte(src), eh, IncludeComments|dontInsertTerms) + + _, actualTok, actualLit := s.Scan() + + assert.Equal(t, tok, actualTok) + if actualTok != token.ILLEGAL { + assert.Equal(t, lit, actualLit) + } + + expectErrors := 0 + if err != "" { + expectErrors = 1 + } + + assert.Equal(t, expectErrors, actualErrors, "Unexpected error count in src %q", src) + assert.Equal(t, err, latestError, "Unexpected error message in src %q", src) + assert.Equal(t, pos, latestPos.Offset(), "Unexpected offset in src %q", src) +} diff --git a/syntax/token/builder/builder.go b/syntax/token/builder/builder.go new file mode 100644 index 0000000000..1dc9b5d62b --- /dev/null +++ b/syntax/token/builder/builder.go @@ -0,0 +1,419 @@ +// Package builder exposes an API to create a River configuration file by +// constructing a set of tokens. +package builder + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + + "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token" +) + +var goRiverDefaulter = reflect.TypeOf((*value.Defaulter)(nil)).Elem() + +// An Expr represents a single River expression. +type Expr struct { + rawTokens []Token +} + +// NewExpr creates a new Expr. +func NewExpr() *Expr { return &Expr{} } + +// Tokens returns the Expr as a set of Tokens. +func (e *Expr) Tokens() []Token { return e.rawTokens } + +// SetValue sets the Expr to a River value converted from a Go value. The Go +// value is encoded using the normal Go to River encoding rules. If any value +// reachable from goValue implements Tokenizer, the printed tokens will instead +// be retrieved by calling the RiverTokenize method. +func (e *Expr) SetValue(goValue interface{}) { + e.rawTokens = tokenEncode(goValue) +} + +// WriteTo renders and formats the File, writing the contents to w. +func (e *Expr) WriteTo(w io.Writer) (int64, error) { + n, err := printExprTokens(w, e.Tokens()) + return int64(n), err +} + +// Bytes renders the File to a formatted byte slice. +func (e *Expr) Bytes() []byte { + var buf bytes.Buffer + _, _ = e.WriteTo(&buf) + return buf.Bytes() +} + +// A File represents a River configuration file. +type File struct { + body *Body +} + +// NewFile creates a new File. +func NewFile() *File { return &File{body: newBody()} } + +// Tokens returns the File as a set of Tokens. +func (f *File) Tokens() []Token { return f.Body().Tokens() } + +// Body returns the Body contents of the file. +func (f *File) Body() *Body { return f.body } + +// WriteTo renders and formats the File, writing the contents to w. +func (f *File) WriteTo(w io.Writer) (int64, error) { + n, err := printFileTokens(w, f.Tokens()) + return int64(n), err +} + +// Bytes renders the File to a formatted byte slice. +func (f *File) Bytes() []byte { + var buf bytes.Buffer + _, _ = f.WriteTo(&buf) + return buf.Bytes() +} + +// Body is a list of block and attribute statements. A Body cannot be manually +// created, but is retrieved from a File or Block. +type Body struct { + nodes []tokenNode + valueOverrideHook ValueOverrideHook +} + +type ValueOverrideHook = func(val interface{}) interface{} + +// SetValueOverrideHook sets a hook to override the value that will be token +// encoded. The hook can mutate the value to be encoded or should return it +// unmodified. This hook can be skipped by leaving it nil or setting it to nil. +func (b *Body) SetValueOverrideHook(valueOverrideHook ValueOverrideHook) { + b.valueOverrideHook = valueOverrideHook +} + +func (b *Body) Nodes() []tokenNode { + return b.nodes +} + +// A tokenNode is a structural element which can be converted into a set of +// Tokens. +type tokenNode interface { + // Tokens builds the set of Tokens from the node. + Tokens() []Token +} + +func newBody() *Body { + return &Body{} +} + +// Tokens returns the File as a set of Tokens. +func (b *Body) Tokens() []Token { + var rawToks []Token + for i, node := range b.nodes { + rawToks = append(rawToks, node.Tokens()...) + + if i+1 < len(b.nodes) { + // Append a terminator between each statement in the Body. + rawToks = append(rawToks, Token{ + Tok: token.LITERAL, + Lit: "\n", + }) + } + } + return rawToks +} + +// AppendTokens appends raw tokens to the Body. +func (b *Body) AppendTokens(tokens []Token) { + b.nodes = append(b.nodes, tokensSlice(tokens)) +} + +// AppendBlock adds a new block inside of the Body. +func (b *Body) AppendBlock(block *Block) { + b.nodes = append(b.nodes, block) +} + +// AppendFrom sets attributes and appends blocks defined by goValue into the +// Body. If any value reachable from goValue implements Tokenizer, the printed +// tokens will instead be retrieved by calling the RiverTokenize method. +// +// Optional attributes and blocks set to default values are trimmed. +// If goValue implements Defaulter, default values are retrieved by +// calling SetToDefault against a copy. Otherwise, default values are +// the zero value of the respective Go types. +// +// goValue must be a struct or a pointer to a struct that contains River struct +// tags. +func (b *Body) AppendFrom(goValue interface{}) { + if goValue == nil { + return + } + + rv := reflect.ValueOf(goValue) + b.encodeFields(rv) +} + +// getBlockLabel returns the label for a given block. +func getBlockLabel(rv reflect.Value) string { + tags := rivertags.Get(rv.Type()) + for _, tag := range tags { + if tag.Flags&rivertags.FlagLabel != 0 { + return reflectutil.Get(rv, tag).String() + } + } + + return "" +} + +func (b *Body) encodeFields(rv reflect.Value) { + for rv.Kind() == reflect.Pointer { + if rv.IsNil() { + return + } + rv = rv.Elem() + } + if rv.Kind() != reflect.Struct { + panic(fmt.Sprintf("river/token/builder: can only encode struct values to bodies, got %s", rv.Type())) + } + + fields := rivertags.Get(rv.Type()) + defaults := reflect.New(rv.Type()).Elem() + if defaults.CanAddr() && defaults.Addr().Type().Implements(goRiverDefaulter) { + defaults.Addr().Interface().(value.Defaulter).SetToDefault() + } + + for _, field := range fields { + fieldVal := reflectutil.Get(rv, field) + fieldValDefault := reflectutil.Get(defaults, field) + + // Check if the values are exactly equal or if they're both equal to the + // zero value. Checking for both fields being zero handles the case where + // an empty and nil map are being compared (which are not equal, but are + // both zero values). + matchesDefault := reflect.DeepEqual(fieldVal.Interface(), fieldValDefault.Interface()) + isZero := fieldValDefault.IsZero() && fieldVal.IsZero() + + if field.IsOptional() && (matchesDefault || isZero) { + continue + } + + b.encodeField(nil, field, fieldVal) + } +} + +func (b *Body) encodeField(prefix []string, field rivertags.Field, fieldValue reflect.Value) { + fieldName := strings.Join(field.Name, ".") + + for fieldValue.Kind() == reflect.Pointer { + if fieldValue.IsNil() { + break + } + fieldValue = fieldValue.Elem() + } + + switch { + case field.IsAttr(): + b.SetAttributeValue(fieldName, fieldValue.Interface()) + + case field.IsBlock(): + fullName := mergeStringSlice(prefix, field.Name) + + switch { + case fieldValue.Kind() == reflect.Map: + // Iterate over the map and add each element as an attribute into it. + if fieldValue.Type().Key().Kind() != reflect.String { + panic("river/token/builder: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) + } + + inner := NewBlock(fullName, "") + inner.body.SetValueOverrideHook(b.valueOverrideHook) + b.AppendBlock(inner) + + iter := fieldValue.MapRange() + for iter.Next() { + mapKey, mapValue := iter.Key(), iter.Value() + inner.body.SetAttributeValue(mapKey.String(), mapValue.Interface()) + } + + case fieldValue.Kind() == reflect.Slice, fieldValue.Kind() == reflect.Array: + for i := 0; i < fieldValue.Len(); i++ { + elem := fieldValue.Index(i) + + // Recursively call encodeField for each element in the slice/array for + // non-zero blocks. The recursive call will hit the case below and add + // a new block for each field encountered. + if field.Flags&rivertags.FlagOptional != 0 && elem.IsZero() { + continue + } + b.encodeField(prefix, field, elem) + } + + case fieldValue.Kind() == reflect.Struct: + inner := NewBlock(fullName, getBlockLabel(fieldValue)) + inner.body.SetValueOverrideHook(b.valueOverrideHook) + inner.Body().encodeFields(fieldValue) + b.AppendBlock(inner) + } + + case field.IsEnum(): + // Blocks within an enum have a prefix set. + newPrefix := mergeStringSlice(prefix, field.Name) + + switch { + case fieldValue.Kind() == reflect.Slice, fieldValue.Kind() == reflect.Array: + for i := 0; i < fieldValue.Len(); i++ { + b.encodeEnumElement(newPrefix, fieldValue.Index(i)) + } + + default: + panic(fmt.Sprintf("river/token/builder: unrecognized enum kind %s", fieldValue.Kind())) + } + } +} + +func mergeStringSlice(a, b []string) []string { + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + res := make([]string, 0, len(a)+len(b)) + res = append(res, a...) + res = append(res, b...) + return res +} + +func (b *Body) encodeEnumElement(prefix []string, enumElement reflect.Value) { + for enumElement.Kind() == reflect.Pointer { + if enumElement.IsNil() { + return + } + enumElement = enumElement.Elem() + } + + fields := rivertags.Get(enumElement.Type()) + + // Find the first non-zero field and encode it. + for _, field := range fields { + fieldVal := reflectutil.Get(enumElement, field) + if !fieldVal.IsValid() || fieldVal.IsZero() { + continue + } + + b.encodeField(prefix, field, fieldVal) + break + } +} + +// SetAttributeTokens sets an attribute to the Body whose value is a set of raw +// tokens. If the attribute was previously set, its value tokens are updated. +// +// Attributes will be written out in the order they were initially created. +func (b *Body) SetAttributeTokens(name string, tokens []Token) { + attr := b.getOrCreateAttribute(name) + attr.RawTokens = tokens +} + +func (b *Body) getOrCreateAttribute(name string) *attribute { + for _, n := range b.nodes { + if attr, ok := n.(*attribute); ok && attr.Name == name { + return attr + } + } + + newAttr := &attribute{Name: name} + b.nodes = append(b.nodes, newAttr) + return newAttr +} + +// SetAttributeValue sets an attribute in the Body whose value is converted +// from a Go value to a River value. The Go value is encoded using the normal +// Go to River encoding rules. If any value reachable from goValue implements +// Tokenizer, the printed tokens will instead be retrieved by calling the +// RiverTokenize method. +// +// If the attribute was previously set, its value tokens are updated. +// +// Attributes will be written out in the order they were initially crated. +func (b *Body) SetAttributeValue(name string, goValue interface{}) { + attr := b.getOrCreateAttribute(name) + + if b.valueOverrideHook != nil { + attr.RawTokens = tokenEncode(b.valueOverrideHook(goValue)) + } else { + attr.RawTokens = tokenEncode(goValue) + } +} + +type attribute struct { + Name string + RawTokens []Token +} + +func (attr *attribute) Tokens() []Token { + var toks []Token + + toks = append(toks, Token{Tok: token.IDENT, Lit: attr.Name}) + toks = append(toks, Token{Tok: token.ASSIGN}) + toks = append(toks, attr.RawTokens...) + + return toks +} + +// A Block encapsulates a body within a named and labeled River block. Blocks +// must be created by calling NewBlock, but its public struct fields may be +// safely modified by callers. +type Block struct { + // Public fields, safe to be changed by callers: + + Name []string + Label string + + // Private fields: + + body *Body +} + +// NewBlock returns a new Block with the given name and label. The name/label +// can be updated later by modifying the Block's public fields. +func NewBlock(name []string, label string) *Block { + return &Block{ + Name: name, + Label: label, + + body: newBody(), + } +} + +// Tokens returns the File as a set of Tokens. +func (b *Block) Tokens() []Token { + var toks []Token + + for i, frag := range b.Name { + toks = append(toks, Token{Tok: token.IDENT, Lit: frag}) + if i+1 < len(b.Name) { + toks = append(toks, Token{Tok: token.DOT}) + } + } + + toks = append(toks, Token{Tok: token.LITERAL, Lit: " "}) + + if b.Label != "" { + toks = append(toks, Token{Tok: token.STRING, Lit: fmt.Sprintf("%q", b.Label)}) + } + + toks = append(toks, Token{Tok: token.LCURLY}, Token{Tok: token.LITERAL, Lit: "\n"}) + toks = append(toks, b.body.Tokens()...) + toks = append(toks, Token{Tok: token.LITERAL, Lit: "\n"}, Token{Tok: token.RCURLY}) + + return toks +} + +// Body returns the Body contained within the Block. +func (b *Block) Body() *Body { return b.body } + +type tokensSlice []Token + +func (tn tokensSlice) Tokens() []Token { return []Token(tn) } diff --git a/syntax/token/builder/builder_test.go b/syntax/token/builder/builder_test.go new file mode 100644 index 0000000000..d363556929 --- /dev/null +++ b/syntax/token/builder/builder_test.go @@ -0,0 +1,411 @@ +package builder_test + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/grafana/river/parser" + "github.com/grafana/river/printer" + "github.com/grafana/river/token" + "github.com/grafana/river/token/builder" + "github.com/stretchr/testify/require" +) + +func TestBuilder_File(t *testing.T) { + f := builder.NewFile() + + f.Body().SetAttributeTokens("attr_1", []builder.Token{{Tok: token.NUMBER, Lit: "15"}}) + f.Body().SetAttributeTokens("attr_2", []builder.Token{{Tok: token.BOOL, Lit: "true"}}) + + b1 := builder.NewBlock([]string{"test", "block"}, "") + b1.Body().SetAttributeTokens("inner_attr", []builder.Token{{Tok: token.STRING, Lit: `"block 1"`}}) + f.Body().AppendBlock(b1) + + b2 := builder.NewBlock([]string{"test", "block"}, "labeled") + b2.Body().SetAttributeTokens("inner_attr", []builder.Token{{Tok: token.STRING, Lit: "`\"block 2`"}}) + f.Body().AppendBlock(b2) + + expect := format(t, ` + attr_1 = 15 + attr_2 = true + + test.block { + inner_attr = "block 1" + } + + test.block "labeled" { + inner_attr = `+"`\"block 2`"+` + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_GoEncode(t *testing.T) { + f := builder.NewFile() + + f.Body().AppendTokens([]builder.Token{{token.COMMENT, "// Hello, world!"}}) + f.Body().SetAttributeValue("null_value", nil) + f.Body().AppendTokens([]builder.Token{{token.LITERAL, "\n"}}) + + f.Body().SetAttributeValue("num", 15) + f.Body().SetAttributeValue("string", "Hello, world!") + f.Body().SetAttributeValue("bool", true) + f.Body().SetAttributeValue("list", []int{0, 1, 2}) + f.Body().SetAttributeValue("func", func(int, int) int { return 0 }) + f.Body().SetAttributeValue("capsule", make(chan int)) + f.Body().AppendTokens([]builder.Token{{token.LITERAL, "\n"}}) + + f.Body().SetAttributeValue("map", map[string]interface{}{"foo": "bar"}) + f.Body().SetAttributeValue("map_2", map[string]interface{}{"non ident": "bar"}) + f.Body().AppendTokens([]builder.Token{{token.LITERAL, "\n"}}) + + f.Body().SetAttributeValue("mixed_list", []interface{}{ + 0, + true, + map[string]interface{}{"key": true}, + "Hello!", + }) + + expect := format(t, ` + // Hello, world! + null_value = null + + num = 15 + string = "Hello, world!" + bool = true + list = [0, 1, 2] + func = function + capsule = capsule("chan int") + + map = { + foo = "bar", + } + map_2 = { + "non ident" = "bar", + } + + mixed_list = [0, true, { + key = true, + }, "Hello!"] + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +// TestBuilder_GoEncode_SortMapKeys ensures that object literals from unordered +// values (i.e., Go maps) are printed in a deterministic order by sorting the +// keys lexicographically. Other object literals should be printed in the order +// the keys are reported in (i.e., in the order presented by the Go structs). +func TestBuilder_GoEncode_SortMapKeys(t *testing.T) { + f := builder.NewFile() + + type Ordered struct { + SomeKey string `river:"some_key,attr"` + OtherKey string `river:"other_key,attr"` + } + + // Maps are unordered because you can't iterate over their keys in a + // consistent order. + var unordered = map[string]interface{}{ + "key_a": 1, + "key_c": 3, + "key_b": 2, + } + + f.Body().SetAttributeValue("ordered", Ordered{SomeKey: "foo", OtherKey: "bar"}) + f.Body().SetAttributeValue("unordered", unordered) + + expect := format(t, ` + ordered = { + some_key = "foo", + other_key = "bar", + } + unordered = { + key_a = 1, + key_b = 2, + key_c = 3, + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_AppendFrom(t *testing.T) { + type InnerBlock struct { + Number int `river:"number,attr"` + } + + type Structure struct { + Field string `river:"field,attr"` + + Block InnerBlock `river:"block,block"` + OtherBlocks []InnerBlock `river:"other_block,block"` + } + + f := builder.NewFile() + f.Body().AppendFrom(Structure{ + Field: "some_value", + + Block: InnerBlock{Number: 1}, + OtherBlocks: []InnerBlock{ + {Number: 2}, + {Number: 3}, + }, + }) + + expect := format(t, ` + field = "some_value" + + block { + number = 1 + } + + other_block { + number = 2 + } + + other_block { + number = 3 + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_AppendFrom_EnumSlice(t *testing.T) { + type InnerBlock struct { + Number int `river:"number,attr"` + } + + type EnumBlock struct { + BlockA InnerBlock `river:"a,block,optional"` + BlockB InnerBlock `river:"b,block,optional"` + BlockC InnerBlock `river:"c,block,optional"` + } + + type Structure struct { + Field string `river:"field,attr"` + + OtherBlocks []EnumBlock `river:"block,enum"` + } + + f := builder.NewFile() + f.Body().AppendFrom(Structure{ + Field: "some_value", + OtherBlocks: []EnumBlock{ + {BlockC: InnerBlock{Number: 1}}, + {BlockB: InnerBlock{Number: 2}}, + {BlockC: InnerBlock{Number: 3}}, + }, + }) + + expect := format(t, ` + field = "some_value" + + block.c { + number = 1 + } + + block.b { + number = 2 + } + + block.c { + number = 3 + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_AppendFrom_EnumSlice_Pointer(t *testing.T) { + type InnerBlock struct { + Number int `river:"number,attr"` + } + + type EnumBlock struct { + BlockA *InnerBlock `river:"a,block,optional"` + BlockB *InnerBlock `river:"b,block,optional"` + BlockC *InnerBlock `river:"c,block,optional"` + } + + type Structure struct { + Field string `river:"field,attr"` + + OtherBlocks []EnumBlock `river:"block,enum"` + } + + f := builder.NewFile() + f.Body().AppendFrom(Structure{ + Field: "some_value", + OtherBlocks: []EnumBlock{ + {BlockC: &InnerBlock{Number: 1}}, + {BlockB: &InnerBlock{Number: 2}}, + {BlockC: &InnerBlock{Number: 3}}, + }, + }) + + expect := format(t, ` + field = "some_value" + + block.c { + number = 1 + } + + block.b { + number = 2 + } + + block.c { + number = 3 + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_SkipOptional(t *testing.T) { + type Structure struct { + OptFieldA string `river:"opt_field_a,attr,optional"` + OptFieldB string `river:"opt_field_b,attr,optional"` + ReqFieldA string `river:"req_field_a,attr"` + ReqFieldB string `river:"req_field_b,attr"` + } + + f := builder.NewFile() + f.Body().AppendFrom(Structure{ + OptFieldA: "some_value", + OptFieldB: "", // Zero value + ReqFieldA: "some_value", + ReqFieldB: "", // Zero value + }) + + expect := format(t, ` + opt_field_a = "some_value" + req_field_a = "some_value" + req_field_b = "" + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func format(t *testing.T, in string) string { + t.Helper() + + f, err := parser.ParseFile(t.Name(), []byte(in)) + require.NoError(t, err) + + var buf bytes.Buffer + require.NoError(t, printer.Fprint(&buf, f)) + + return buf.String() +} + +type CustomTokenizer bool + +var _ builder.Tokenizer = (CustomTokenizer)(false) + +func (ct CustomTokenizer) RiverTokenize() []builder.Token { + return []builder.Token{{Tok: token.LITERAL, Lit: "CUSTOM_TOKENS"}} +} + +func TestBuilder_GoEncode_Tokenizer(t *testing.T) { + t.Run("Tokenizer", func(t *testing.T) { + f := builder.NewFile() + f.Body().SetAttributeValue("value", CustomTokenizer(true)) + + expect := format(t, `value = CUSTOM_TOKENS`) + require.Equal(t, expect, string(f.Bytes())) + }) + + t.Run("TextMarshaler", func(t *testing.T) { + now := time.Now() + expectBytes, err := now.MarshalText() + require.NoError(t, err) + + f := builder.NewFile() + f.Body().SetAttributeValue("value", now) + + expect := format(t, fmt.Sprintf(`value = %q`, string(expectBytes))) + require.Equal(t, expect, string(f.Bytes())) + }) + + t.Run("Duration", func(t *testing.T) { + dur := 15 * time.Second + + f := builder.NewFile() + f.Body().SetAttributeValue("value", dur) + + expect := format(t, fmt.Sprintf(`value = %q`, dur.String())) + require.Equal(t, expect, string(f.Bytes())) + }) +} + +func TestBuilder_ValueOverrideHook(t *testing.T) { + type InnerBlock struct { + AnotherField string `river:"another_field,attr"` + } + + type Structure struct { + Field string `river:"field,attr"` + + Block InnerBlock `river:"block,block"` + OtherBlocks []InnerBlock `river:"other_block,block"` + } + + f := builder.NewFile() + f.Body().SetValueOverrideHook(func(val interface{}) interface{} { + return "some other value" + }) + f.Body().AppendFrom(Structure{ + Field: "some_value", + + Block: InnerBlock{AnotherField: "some_value"}, + OtherBlocks: []InnerBlock{ + {AnotherField: "some_value"}, + {AnotherField: "some_value"}, + }, + }) + + expect := format(t, ` + field = "some other value" + + block { + another_field = "some other value" + } + + other_block { + another_field = "some other value" + } + + other_block { + another_field = "some other value" + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} + +func TestBuilder_MapBlocks(t *testing.T) { + type block struct { + Value map[string]any `river:"block,block,optional"` + } + + f := builder.NewFile() + f.Body().AppendFrom(block{ + Value: map[string]any{ + "field": "value", + }, + }) + + expect := format(t, ` + block { + field = "value" + } + `) + + require.Equal(t, expect, string(f.Bytes())) +} diff --git a/syntax/token/builder/nested_defaults_test.go b/syntax/token/builder/nested_defaults_test.go new file mode 100644 index 0000000000..1fd8122b28 --- /dev/null +++ b/syntax/token/builder/nested_defaults_test.go @@ -0,0 +1,233 @@ +package builder_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/grafana/river/ast" + "github.com/grafana/river/parser" + "github.com/grafana/river/token/builder" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +const ( + defaultNumber = 123 + otherDefaultNumber = 321 +) + +var testCases = []struct { + name string + input interface{} + expectedRiver string +}{ + { + name: "struct propagating default - input matching default", + input: StructPropagatingDefault{Inner: AttrWithDefault{Number: defaultNumber}}, + expectedRiver: "", + }, + { + name: "struct propagating default - input with zero-value struct", + input: StructPropagatingDefault{}, + expectedRiver: ` + inner { + number = 0 + } + `, + }, + { + name: "struct propagating default - input with non-default value", + input: StructPropagatingDefault{Inner: AttrWithDefault{Number: 42}}, + expectedRiver: ` + inner { + number = 42 + } + `, + }, + { + name: "pointer propagating default - input matching default", + input: PtrPropagatingDefault{Inner: &AttrWithDefault{Number: defaultNumber}}, + expectedRiver: "", + }, + { + name: "pointer propagating default - input with zero value", + input: PtrPropagatingDefault{Inner: &AttrWithDefault{}}, + expectedRiver: ` + inner { + number = 0 + } + `, + }, + { + name: "pointer propagating default - input with non-default value", + input: PtrPropagatingDefault{Inner: &AttrWithDefault{Number: 42}}, + expectedRiver: ` + inner { + number = 42 + } + `, + }, + { + name: "zero default - input with zero value", + input: ZeroDefault{Inner: &AttrWithDefault{}}, + expectedRiver: "", + }, + { + name: "zero default - input with non-default value", + input: ZeroDefault{Inner: &AttrWithDefault{Number: 42}}, + expectedRiver: ` + inner { + number = 42 + } + `, + }, + { + name: "no default - input with zero value", + input: NoDefaultDefined{Inner: &AttrWithDefault{}}, + expectedRiver: ` + inner { + number = 0 + } + `, + }, + { + name: "no default - input with non-default value", + input: NoDefaultDefined{Inner: &AttrWithDefault{Number: 42}}, + expectedRiver: ` + inner { + number = 42 + } + `, + }, + { + name: "mismatching default - input matching outer default", + input: MismatchingDefault{Inner: &AttrWithDefault{Number: otherDefaultNumber}}, + expectedRiver: "", + }, + { + name: "mismatching default - input matching inner default", + input: MismatchingDefault{Inner: &AttrWithDefault{Number: defaultNumber}}, + expectedRiver: "inner { }", + }, + { + name: "mismatching default - input with non-default value", + input: MismatchingDefault{Inner: &AttrWithDefault{Number: 42}}, + expectedRiver: ` + inner { + number = 42 + } + `, + }, +} + +func TestNestedDefaults(t *testing.T) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("%T/%s", tc.input, tc.name), func(t *testing.T) { + f := builder.NewFile() + f.Body().AppendFrom(tc.input) + actualRiver := string(f.Bytes()) + expected := format(t, tc.expectedRiver) + require.Equal(t, expected, actualRiver, "generated river didn't match expected") + + // Now decode the River produced above and make sure it's the same as the input. + eval := vm.New(parseBlock(t, actualRiver)) + vPtr := reflect.New(reflect.TypeOf(tc.input)).Interface() + require.NoError(t, eval.Evaluate(nil, vPtr), "river evaluation error") + + actualOut := reflect.ValueOf(vPtr).Elem().Interface() + require.Equal(t, tc.input, actualOut, "Invariant violated: encoded and then decoded block didn't match the original value") + }) + } +} + +func TestPtrPropagatingDefaultWithNil(t *testing.T) { + // This is a special case - when defaults are correctly defined, the `Inner: nil` should mean to use defaults. + // Encoding will encode to empty string and decoding will produce the default value - `Inner: {Number: 123}`. + input := PtrPropagatingDefault{} + expectedEncodedRiver := "" + expectedDecoded := PtrPropagatingDefault{Inner: &AttrWithDefault{Number: 123}} + + f := builder.NewFile() + f.Body().AppendFrom(input) + actualRiver := string(f.Bytes()) + expected := format(t, expectedEncodedRiver) + require.Equal(t, expected, actualRiver, "generated river didn't match expected") + + // Now decode the River produced above and make sure it's the same as the input. + eval := vm.New(parseBlock(t, actualRiver)) + vPtr := reflect.New(reflect.TypeOf(input)).Interface() + require.NoError(t, eval.Evaluate(nil, vPtr), "river evaluation error") + + actualOut := reflect.ValueOf(vPtr).Elem().Interface() + require.Equal(t, expectedDecoded, actualOut) +} + +// StructPropagatingDefault has the outer defaults matching the inner block's defaults. The inner block is a struct. +type StructPropagatingDefault struct { + Inner AttrWithDefault `river:"inner,block,optional"` +} + +func (o *StructPropagatingDefault) SetToDefault() { + inner := &AttrWithDefault{} + inner.SetToDefault() + *o = StructPropagatingDefault{Inner: *inner} +} + +// PtrPropagatingDefault has the outer defaults matching the inner block's defaults. The inner block is a pointer. +type PtrPropagatingDefault struct { + Inner *AttrWithDefault `river:"inner,block,optional"` +} + +func (o *PtrPropagatingDefault) SetToDefault() { + inner := &AttrWithDefault{} + inner.SetToDefault() + *o = PtrPropagatingDefault{Inner: inner} +} + +// MismatchingDefault has the outer defaults NOT matching the inner block's defaults. The inner block is a pointer. +type MismatchingDefault struct { + Inner *AttrWithDefault `river:"inner,block,optional"` +} + +func (o *MismatchingDefault) SetToDefault() { + *o = MismatchingDefault{Inner: &AttrWithDefault{ + Number: otherDefaultNumber, + }} +} + +// ZeroDefault has the outer defaults setting to zero values. The inner block is a pointer. +type ZeroDefault struct { + Inner *AttrWithDefault `river:"inner,block,optional"` +} + +func (o *ZeroDefault) SetToDefault() { + *o = ZeroDefault{Inner: &AttrWithDefault{}} +} + +// NoDefaultDefined has no defaults defined. The inner block is a pointer. +type NoDefaultDefined struct { + Inner *AttrWithDefault `river:"inner,block,optional"` +} + +// AttrWithDefault has a default value of a non-zero number. +type AttrWithDefault struct { + Number int `river:"number,attr,optional"` +} + +func (i *AttrWithDefault) SetToDefault() { + *i = AttrWithDefault{Number: defaultNumber} +} + +func parseBlock(t *testing.T, input string) *ast.BlockStmt { + t.Helper() + + input = fmt.Sprintf("test { %s }", input) + res, err := parser.ParseFile("", []byte(input)) + require.NoError(t, err) + require.Len(t, res.Body, 1) + + stmt, ok := res.Body[0].(*ast.BlockStmt) + require.True(t, ok, "Expected stmt to be a ast.BlockStmt, got %T", res.Body[0]) + return stmt +} diff --git a/syntax/token/builder/token.go b/syntax/token/builder/token.go new file mode 100644 index 0000000000..390b968959 --- /dev/null +++ b/syntax/token/builder/token.go @@ -0,0 +1,81 @@ +package builder + +import ( + "bytes" + "io" + + "github.com/grafana/river/parser" + "github.com/grafana/river/printer" + "github.com/grafana/river/token" +) + +// A Token is a wrapper around token.Token which contains the token type +// alongside its literal. Use LiteralTok as the Tok field to write literal +// characters such as whitespace. +type Token struct { + Tok token.Token + Lit string +} + +// printFileTokens prints out the tokens as River text and formats them, writing +// the final result to w. +func printFileTokens(w io.Writer, toks []Token) (int, error) { + var raw bytes.Buffer + for _, tok := range toks { + switch { + case tok.Tok == token.LITERAL: + raw.WriteString(tok.Lit) + case tok.Tok == token.COMMENT: + raw.WriteString(tok.Lit) + case tok.Tok.IsLiteral() || tok.Tok.IsKeyword(): + raw.WriteString(tok.Lit) + default: + raw.WriteString(tok.Tok.String()) + } + } + + f, err := parser.ParseFile("", raw.Bytes()) + if err != nil { + return 0, err + } + + wc := &writerCount{w: w} + err = printer.Fprint(wc, f) + return wc.n, err +} + +// printExprTokens prints out the tokens as River text and formats them, +// writing the final result to w. +func printExprTokens(w io.Writer, toks []Token) (int, error) { + var raw bytes.Buffer + for _, tok := range toks { + switch { + case tok.Tok == token.LITERAL: + raw.WriteString(tok.Lit) + case tok.Tok.IsLiteral() || tok.Tok.IsKeyword(): + raw.WriteString(tok.Lit) + default: + raw.WriteString(tok.Tok.String()) + } + } + + expr, err := parser.ParseExpression(raw.String()) + if err != nil { + return 0, err + } + + wc := &writerCount{w: w} + err = printer.Fprint(wc, expr) + return wc.n, err +} + +type writerCount struct { + w io.Writer + n int +} + +func (wc *writerCount) Write(p []byte) (n int, err error) { + n, err = wc.w.Write(p) + wc.n += n + return +} diff --git a/syntax/token/builder/value_tokens.go b/syntax/token/builder/value_tokens.go new file mode 100644 index 0000000000..c73e34f7b6 --- /dev/null +++ b/syntax/token/builder/value_tokens.go @@ -0,0 +1,95 @@ +package builder + +import ( + "fmt" + "sort" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/scanner" + "github.com/grafana/river/token" +) + +// TODO(rfratto): check for optional values + +// Tokenizer is any value which can return a raw set of tokens. +type Tokenizer interface { + // RiverTokenize returns the raw set of River tokens which are used when + // printing out the value with river/token/builder. + RiverTokenize() []Token +} + +func tokenEncode(val interface{}) []Token { + return valueTokens(value.Encode(val)) +} + +func valueTokens(v value.Value) []Token { + var toks []Token + + // If v is a Tokenizer, allow it to override what tokens get generated. + if tk, ok := v.Interface().(Tokenizer); ok { + return tk.RiverTokenize() + } + + switch v.Type() { + case value.TypeNull: + toks = append(toks, Token{token.NULL, "null"}) + + case value.TypeNumber: + toks = append(toks, Token{token.NUMBER, v.Number().ToString()}) + + case value.TypeString: + toks = append(toks, Token{token.STRING, fmt.Sprintf("%q", v.Text())}) + + case value.TypeBool: + toks = append(toks, Token{token.STRING, fmt.Sprintf("%v", v.Bool())}) + + case value.TypeArray: + toks = append(toks, Token{token.LBRACK, ""}) + elems := v.Len() + for i := 0; i < elems; i++ { + elem := v.Index(i) + + toks = append(toks, valueTokens(elem)...) + if i+1 < elems { + toks = append(toks, Token{token.COMMA, ""}) + } + } + toks = append(toks, Token{token.RBRACK, ""}) + + case value.TypeObject: + toks = append(toks, Token{token.LCURLY, ""}, Token{token.LITERAL, "\n"}) + + keys := v.Keys() + + // If v isn't an ordered object (i.e., a go map), sort the keys so they + // have a deterministic print order. + if !v.OrderedKeys() { + sort.Strings(keys) + } + + for i := 0; i < len(keys); i++ { + if scanner.IsValidIdentifier(keys[i]) { + toks = append(toks, Token{token.IDENT, keys[i]}) + } else { + toks = append(toks, Token{token.STRING, fmt.Sprintf("%q", keys[i])}) + } + + field, _ := v.Key(keys[i]) + toks = append(toks, Token{token.ASSIGN, ""}) + toks = append(toks, valueTokens(field)...) + toks = append(toks, Token{token.COMMA, ""}, Token{token.LITERAL, "\n"}) + } + toks = append(toks, Token{token.RCURLY, ""}) + + case value.TypeFunction: + toks = append(toks, Token{token.LITERAL, v.Describe()}) + + case value.TypeCapsule: + toks = append(toks, Token{token.LITERAL, v.Describe()}) + + default: + panic(fmt.Sprintf("river/token/builder: unrecognized value type %q", v.Type())) + } + + return toks +} diff --git a/syntax/token/file.go b/syntax/token/file.go new file mode 100644 index 0000000000..419dbaa57c --- /dev/null +++ b/syntax/token/file.go @@ -0,0 +1,142 @@ +package token + +import ( + "fmt" + "sort" + "strconv" +) + +// NoPos is the zero value for Pos. It has no file or line information +// associated with it, and NoPos.Valid is false. +var NoPos = Pos{} + +// Pos is a compact representation of a position within a file. It can be +// converted into a Position for a more convenient, but larger, representation. +type Pos struct { + file *File + off int +} + +// String returns the string form of the Pos (the offset). +func (p Pos) String() string { return strconv.Itoa(p.off) } + +// File returns the file used by the Pos. This will be nil for invalid +// positions. +func (p Pos) File() *File { return p.file } + +// Position converts the Pos into a Position. +func (p Pos) Position() Position { return p.file.PositionFor(p) } + +// Add creates a new Pos relative to p. +func (p Pos) Add(n int) Pos { + return Pos{ + file: p.file, + off: p.off + n, + } +} + +// Offset returns the byte offset associated with Pos. +func (p Pos) Offset() int { return p.off } + +// Valid reports whether the Pos is valid. +func (p Pos) Valid() bool { return p != NoPos } + +// Position holds full position information for a location within an individual +// file. +type Position struct { + Filename string // Filename (if any) + Offset int // Byte offset (starting at 0) + Line int // Line number (starting at 1) + Column int // Offset from start of line (starting at 1) +} + +// Valid reports whether the position is valid. Valid positions must have a +// Line value greater than 0. +func (pos *Position) Valid() bool { return pos.Line > 0 } + +// String returns a string in one of the following forms: +// +// file:line:column Valid position with file name +// file:line Valid position with file name but no column +// line:column Valid position with no file name +// line Valid position with no file name or column +// file Invalid position with file name +// - Invalid position with no file name +func (pos Position) String() string { + s := pos.Filename + + if pos.Valid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d", pos.Line) + if pos.Column != 0 { + s += fmt.Sprintf(":%d", pos.Column) + } + } + + if s == "" { + s = "-" + } + return s +} + +// File holds position information for a specific file. +type File struct { + filename string + lines []int // Byte offset of each line number (first element is always 0). +} + +// NewFile creates a new File for storing position information. +func NewFile(filename string) *File { + return &File{ + filename: filename, + lines: []int{0}, + } +} + +// Pos returns a Pos given a byte offset. Pos panics if off is < 0. +func (f *File) Pos(off int) Pos { + if off < 0 { + panic("Pos: illegal offset") + } + return Pos{file: f, off: off} +} + +// Name returns the name of the file. +func (f *File) Name() string { return f.filename } + +// AddLine tracks a new line from a byte offset. The line offset must be larger +// than the offset for the previous line, otherwise the line offset is ignored. +func (f *File) AddLine(offset int) { + lines := len(f.lines) + if f.lines[lines-1] < offset { + f.lines = append(f.lines, offset) + } +} + +// PositionFor returns a Position from an offset. +func (f *File) PositionFor(p Pos) Position { + if p == NoPos { + return Position{} + } + + // Search through our line offsets to find the line/column info. The else + // case should never happen here, but if it does, Position.Valid will return + // false. + var line, column int + if i := searchInts(f.lines, p.off); i >= 0 { + line, column = i+1, p.off-f.lines[i]+1 + } + + return Position{ + Filename: f.filename, + Offset: p.off, + Line: line, + Column: column, + } +} + +func searchInts(a []int, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 +} diff --git a/syntax/token/token.go b/syntax/token/token.go new file mode 100644 index 0000000000..74dabc9de2 --- /dev/null +++ b/syntax/token/token.go @@ -0,0 +1,174 @@ +// Package token defines the lexical elements of a River config and utilities +// surrounding their position. +package token + +// Token is an individual River lexical token. +type Token int + +// List of all lexical tokens and examples that represent them. +// +// LITERAL is used by token/builder to represent literal strings for writing +// tokens, but never used for reading (so scanner never returns a +// token.LITERAL). +const ( + ILLEGAL Token = iota // Invalid token. + LITERAL // Literal text. + EOF // End-of-file. + COMMENT // // Hello, world! + + literalBeg + IDENT // foobar + NUMBER // 1234 + FLOAT // 1234.0 + STRING // "foobar" + literalEnd + + keywordBeg + BOOL // true + NULL // null + keywordEnd + + operatorBeg + OR // || + AND // && + NOT // ! + + ASSIGN // = + + EQ // == + NEQ // != + LT // < + LTE // <= + GT // > + GTE // >= + + ADD // + + SUB // - + MUL // * + DIV // / + MOD // % + POW // ^ + + LCURLY // { + RCURLY // } + LPAREN // ( + RPAREN // ) + LBRACK // [ + RBRACK // ] + COMMA // , + DOT // . + operatorEnd + + TERMINATOR // \n +) + +var tokenNames = [...]string{ + ILLEGAL: "ILLEGAL", + LITERAL: "LITERAL", + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + NUMBER: "NUMBER", + FLOAT: "FLOAT", + STRING: "STRING", + BOOL: "BOOL", + NULL: "NULL", + + OR: "||", + AND: "&&", + NOT: "!", + + ASSIGN: "=", + EQ: "==", + NEQ: "!=", + LT: "<", + LTE: "<=", + GT: ">", + GTE: ">=", + + ADD: "+", + SUB: "-", + MUL: "*", + DIV: "/", + MOD: "%", + POW: "^", + + LCURLY: "{", + RCURLY: "}", + LPAREN: "(", + RPAREN: ")", + LBRACK: "[", + RBRACK: "]", + COMMA: ",", + DOT: ".", + + TERMINATOR: "TERMINATOR", +} + +// Lookup maps a string to its keyword token or IDENT if it's not a keyword. +func Lookup(ident string) Token { + switch ident { + case "true", "false": + return BOOL + case "null": + return NULL + default: + return IDENT + } +} + +// String returns the string representation corresponding to the token. +func (t Token) String() string { + if int(t) >= len(tokenNames) { + return "ILLEGAL" + } + + name := tokenNames[t] + if name == "" { + return "ILLEGAL" + } + return name +} + +// GoString returns the %#v format of t. +func (t Token) GoString() string { return t.String() } + +// IsKeyword returns true if the token corresponds to a keyword. +func (t Token) IsKeyword() bool { return t > keywordBeg && t < keywordEnd } + +// IsLiteral returns true if the token corresponds to a literal token or +// identifier. +func (t Token) IsLiteral() bool { return t > literalBeg && t < literalEnd } + +// IsOperator returns true if the token corresponds to an operator or +// delimiter. +func (t Token) IsOperator() bool { return t > operatorBeg && t < operatorEnd } + +// BinaryPrecedence returns the operator precedence of the binary operator t. +// If t is not a binary operator, the result is LowestPrecedence. +func (t Token) BinaryPrecedence() int { + switch t { + case OR: + return 1 + case AND: + return 2 + case EQ, NEQ, LT, LTE, GT, GTE: + return 3 + case ADD, SUB: + return 4 + case MUL, DIV, MOD: + return 5 + case POW: + return 6 + } + + return LowestPrecedence +} + +// Levels of precedence for operator tokens. +const ( + LowestPrecedence = 0 // non-operators + UnaryPrecedence = 7 + HighestPrecedence = 8 +) diff --git a/syntax/types.go b/syntax/types.go new file mode 100644 index 0000000000..b0123010b8 --- /dev/null +++ b/syntax/types.go @@ -0,0 +1,97 @@ +package river + +import "github.com/grafana/river/internal/value" + +// Our types in this file are re-implementations of interfaces from +// value.Capsule. They are *not* defined as type aliases, since pkg.go.dev +// would show the type alias instead of the contents of that type (which IMO is +// a frustrating user experience). +// +// The types below must be kept in sync with the internal package, and the +// checks below ensure they're compatible. +var ( + _ value.Defaulter = (Defaulter)(nil) + _ value.Unmarshaler = (Unmarshaler)(nil) + _ value.Validator = (Validator)(nil) + _ value.Capsule = (Capsule)(nil) + _ value.ConvertibleFromCapsule = (ConvertibleFromCapsule)(nil) + _ value.ConvertibleIntoCapsule = (ConvertibleIntoCapsule)(nil) +) + +// The Unmarshaler interface allows a type to hook into River decoding and +// decode into another type or provide pre-decoding logic. +type Unmarshaler interface { + // UnmarshalRiver is invoked when decoding a River value into a Go value. f + // should be called with a pointer to a value to decode into. UnmarshalRiver + // will not be called on types which are squashed into the parent struct + // using `river:",squash"`. + UnmarshalRiver(f func(v interface{}) error) error +} + +// The Defaulter interface allows a type to implement default functionality +// in River evaluation. +type Defaulter interface { + // SetToDefault is called when evaluating a block or body to set the value + // to its defaults. SetToDefault will not be called on types which are + // squashed into the parent struct using `river:",squash"`. + SetToDefault() +} + +// The Validator interface allows a type to implement validation functionality +// in River evaluation. +type Validator interface { + // Validate is called when evaluating a block or body to enforce the + // value is valid. Validate will not be called on types which are + // squashed into the parent struct using `river:",squash"`. + Validate() error +} + +// Capsule is an interface marker which tells River that a type should always +// be treated as a "capsule type" instead of the default type River would +// assign. +// +// Capsule types are useful for passing around arbitrary Go values in River +// expressions and for declaring new synthetic types with custom conversion +// rules. +// +// By default, only two capsule values of the same underlying Go type are +// compatible. Types which implement ConvertibleFromCapsule or +// ConvertibleToCapsule can provide custom logic for conversions from and to +// other types. +type Capsule interface { + // RiverCapsule marks the type as a Capsule. RiverCapsule is never invoked by + // River. + RiverCapsule() +} + +// ErrNoConversion is returned by implementations of ConvertibleFromCapsule and +// ConvertibleToCapsule when a conversion with a specific type is unavailable. +// +// Returning this error causes River to fall back to default conversion rules. +var ErrNoConversion = value.ErrNoConversion + +// ConvertibleFromCapsule is a Capsule which supports custom conversion from +// any Go type which is not the same as the capsule type. +type ConvertibleFromCapsule interface { + Capsule + + // ConvertFrom updates the ConvertibleFromCapsule value based on the value of + // src. src may be any Go value, not just other capsules. + // + // ConvertFrom should return ErrNoConversion if no conversion is available + // from src. Other errors are treated as a River decoding error. + ConvertFrom(src interface{}) error +} + +// ConvertibleIntoCapsule is a Capsule which supports custom conversion into +// any Go type which is not the same as the capsule type. +type ConvertibleIntoCapsule interface { + Capsule + + // ConvertInto should convert its value and store it into dst. dst will be a + // pointer to a Go value of any type. + // + // ConvertInto should return ErrNoConversion if no conversion into dst is + // available. Other errors are treated as a River decoding error. + ConvertInto(dst interface{}) error +} diff --git a/syntax/vm/constant.go b/syntax/vm/constant.go new file mode 100644 index 0000000000..d2e54c717d --- /dev/null +++ b/syntax/vm/constant.go @@ -0,0 +1,64 @@ +package vm + +import ( + "fmt" + "strconv" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token" +) + +func valueFromLiteral(lit string, tok token.Token) (value.Value, error) { + // NOTE(rfratto): this function should never return an error, since the + // parser only produces valid tokens; it can only fail if a user hand-builds + // an AST with invalid literals. + + switch tok { + case token.NULL: + return value.Null, nil + + case token.NUMBER: + intVal, err1 := strconv.ParseInt(lit, 0, 64) + if err1 == nil { + return value.Int(intVal), nil + } + + uintVal, err2 := strconv.ParseUint(lit, 0, 64) + if err2 == nil { + return value.Uint(uintVal), nil + } + + floatVal, err3 := strconv.ParseFloat(lit, 64) + if err3 == nil { + return value.Float(floatVal), nil + } + + return value.Null, err3 + + case token.FLOAT: + v, err := strconv.ParseFloat(lit, 64) + if err != nil { + return value.Null, err + } + return value.Float(v), nil + + case token.STRING: + v, err := strconv.Unquote(lit) + if err != nil { + return value.Null, err + } + return value.String(v), nil + + case token.BOOL: + switch lit { + case "true": + return value.Bool(true), nil + case "false": + return value.Bool(false), nil + default: + return value.Null, fmt.Errorf("invalid boolean literal %q", lit) + } + default: + panic(fmt.Sprintf("%v is not a valid token", tok)) + } +} diff --git a/syntax/vm/error.go b/syntax/vm/error.go new file mode 100644 index 0000000000..c82a5b418e --- /dev/null +++ b/syntax/vm/error.go @@ -0,0 +1,106 @@ +package vm + +import ( + "fmt" + "strings" + + "github.com/grafana/river/ast" + "github.com/grafana/river/diag" + "github.com/grafana/river/internal/value" + "github.com/grafana/river/printer" + "github.com/grafana/river/token/builder" +) + +// makeDiagnostic tries to convert err into a diag.Diagnostic. err must be an +// error from the river/internal/value package, otherwise err will be returned +// unmodified. +func makeDiagnostic(err error, assoc map[value.Value]ast.Node) error { + var ( + node ast.Node + expr strings.Builder + message string + cause value.Value + + // Until we find a node, we're not a literal error. + literal = false + ) + + isValueError := value.WalkError(err, func(err error) { + var val value.Value + + switch ne := err.(type) { + case value.Error: + message = ne.Error() + val = ne.Value + case value.TypeError: + message = fmt.Sprintf("should be %s, got %s", ne.Expected, ne.Value.Type()) + val = ne.Value + case value.MissingKeyError: + message = fmt.Sprintf("does not have field named %q", ne.Missing) + val = ne.Value + case value.ElementError: + fmt.Fprintf(&expr, "[%d]", ne.Index) + val = ne.Value + case value.FieldError: + fmt.Fprintf(&expr, ".%s", ne.Field) + val = ne.Value + } + + cause = val + + if foundNode, ok := assoc[val]; ok { + // If we just found a direct node, we can reset the expression buffer so + // we don't unnecessarily print element and field accesses for we can see + // directly in the file. + if literal { + expr.Reset() + } + + node = foundNode + literal = true + } else { + literal = false + } + }) + if !isValueError { + return err + } + + if node != nil { + var nodeText strings.Builder + if err := printer.Fprint(&nodeText, node); err != nil { + // This should never panic; printer.Fprint only fails when given an + // unexpected type, which we never do here. + panic(err) + } + + // Merge the node text with the expression together (which will be relative + // accesses to the expression). + message = fmt.Sprintf("%s%s %s", nodeText.String(), expr.String(), message) + } else { + message = fmt.Sprintf("%s %s", expr.String(), message) + } + + // Render the underlying problematic value as a string. + var valueText string + if cause != value.Null { + be := builder.NewExpr() + be.SetValue(cause.Interface()) + valueText = string(be.Bytes()) + } + if literal { + // Hide the value if the node itself has the error we were worried about. + valueText = "" + } + + d := diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: message, + Value: valueText, + } + if node != nil { + d.StartPos = ast.StartPos(node).Position() + d.EndPos = ast.EndPos(node).Position() + } + return d +} diff --git a/syntax/vm/op_binary.go b/syntax/vm/op_binary.go new file mode 100644 index 0000000000..e329f6fc49 --- /dev/null +++ b/syntax/vm/op_binary.go @@ -0,0 +1,360 @@ +package vm + +import ( + "fmt" + "math" + "reflect" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/token" +) + +func evalBinop(lhs value.Value, op token.Token, rhs value.Value) (value.Value, error) { + // Original parameters of lhs and rhs used for returning errors. + var ( + origLHS = lhs + origRHS = rhs + ) + + // Hack to allow OptionalSecrets to be used in binary operations. + // + // TODO(rfratto): be more flexible in the future with broader definitions of + // how capsules can be converted to other types for the purposes of doing a + // binop. + if lhs.Type() == value.TypeCapsule { + lhs = tryUnwrapOptionalSecret(lhs) + } + if rhs.Type() == value.TypeCapsule { + rhs = tryUnwrapOptionalSecret(rhs) + } + + // TODO(rfratto): evalBinop should check for underflows and overflows + + // We have special handling for EQ and NEQ since it's valid to attempt to + // compare values of any two types. + switch op { + case token.EQ: + return value.Bool(valuesEqual(lhs, rhs)), nil + case token.NEQ: + return value.Bool(!valuesEqual(lhs, rhs)), nil + } + + // The type of lhs and rhs must be acceptable for the binary operator. + if !acceptableBinopType(lhs, op) { + return value.Null, value.Error{ + Value: origLHS, + Inner: fmt.Errorf("should be one of %v for binop %s, got %s", binopAllowedTypes[op], op, lhs.Type()), + } + } else if !acceptableBinopType(rhs, op) { + return value.Null, value.Error{ + Value: origRHS, + Inner: fmt.Errorf("should be one of %v for binop %s, got %s", binopAllowedTypes[op], op, rhs.Type()), + } + } + + // At this point, regardless of the operator, lhs and rhs must have the same + // type. + if lhs.Type() != rhs.Type() { + return value.Null, value.TypeError{Value: rhs, Expected: lhs.Type()} + } + + switch op { + case token.OR: // bool || bool + return value.Bool(lhs.Bool() || rhs.Bool()), nil + case token.AND: // bool && Bool + return value.Bool(lhs.Bool() && rhs.Bool()), nil + + case token.ADD: // number + number, string + string + if lhs.Type() == value.TypeString { + return value.String(lhs.Text() + rhs.Text()), nil + } + + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(lhsNum.Uint() + rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Int(lhsNum.Int() + rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Float(lhsNum.Float() + rhsNum.Float()), nil + } + + case token.SUB: // number - number + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(lhsNum.Uint() - rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Int(lhsNum.Int() - rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Float(lhsNum.Float() - rhsNum.Float()), nil + } + + case token.MUL: // number * number + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(lhsNum.Uint() * rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Int(lhsNum.Int() * rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Float(lhsNum.Float() * rhsNum.Float()), nil + } + + case token.DIV: // number / number + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(lhsNum.Uint() / rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Int(lhsNum.Int() / rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Float(lhsNum.Float() / rhsNum.Float()), nil + } + + case token.MOD: // number % number + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(lhsNum.Uint() % rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Int(lhsNum.Int() % rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Float(math.Mod(lhsNum.Float(), rhsNum.Float())), nil + } + + case token.POW: // number ^ number + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Uint(intPow(lhsNum.Uint(), rhsNum.Uint())), nil + case value.NumberKindInt: + return value.Int(intPow(lhsNum.Int(), rhsNum.Int())), nil + case value.NumberKindFloat: + return value.Float(math.Pow(lhsNum.Float(), rhsNum.Float())), nil + } + + case token.LT: // number < number, string < string + // Check string first. + if lhs.Type() == value.TypeString { + return value.Bool(lhs.Text() < rhs.Text()), nil + } + + // Not a string; must be a number. + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Bool(lhsNum.Uint() < rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Bool(lhsNum.Int() < rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Bool(lhsNum.Float() < rhsNum.Float()), nil + } + + case token.GT: // number > number, string > string + // Check string first. + if lhs.Type() == value.TypeString { + return value.Bool(lhs.Text() > rhs.Text()), nil + } + + // Not a string; must be a number. + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Bool(lhsNum.Uint() > rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Bool(lhsNum.Int() > rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Bool(lhsNum.Float() > rhsNum.Float()), nil + } + + case token.LTE: // number <= number, string <= string + // Check string first. + if lhs.Type() == value.TypeString { + return value.Bool(lhs.Text() <= rhs.Text()), nil + } + + // Not a string; must be a number. + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Bool(lhsNum.Uint() <= rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Bool(lhsNum.Int() <= rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Bool(lhsNum.Float() <= rhsNum.Float()), nil + } + + case token.GTE: // number >= number, string >= string + // Check string first. + if lhs.Type() == value.TypeString { + return value.Bool(lhs.Text() >= rhs.Text()), nil + } + + // Not a string; must be a number. + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return value.Bool(lhsNum.Uint() >= rhsNum.Uint()), nil + case value.NumberKindInt: + return value.Bool(lhsNum.Int() >= rhsNum.Int()), nil + case value.NumberKindFloat: + return value.Bool(lhsNum.Float() >= rhsNum.Float()), nil + } + } + + panic("river/vm: unreachable") +} + +// tryUnwrapOptionalSecret accepts a value and, if it is a +// rivertypes.OptionalSecret where IsSecret is false, returns a string value +// instead. +// +// If val is not a rivertypes.OptionalSecret or IsSecret is true, +// tryUnwrapOptionalSecret returns the input value unchanged. +func tryUnwrapOptionalSecret(val value.Value) value.Value { + optSecret, ok := val.Interface().(rivertypes.OptionalSecret) + if !ok || optSecret.IsSecret { + return val + } + + return value.String(optSecret.Value) +} + +// valuesEqual returns true if two River Values are equal. +func valuesEqual(lhs value.Value, rhs value.Value) bool { + if lhs.Type() != rhs.Type() { + // Two values with different types are never equal. + return false + } + + switch lhs.Type() { + case value.TypeNull: + // Nothing to compare here: both lhs and rhs have the null type, + // so they're equal. + return true + + case value.TypeNumber: + // Two numbers are equal if they have equal values. However, we have to + // determine what comparison we want to do and upcast the values to a + // different Go type as needed (so that 3 == 3.0 is true). + lhsNum, rhsNum := lhs.Number(), rhs.Number() + switch fitNumberKinds(lhsNum.Kind(), rhsNum.Kind()) { + case value.NumberKindUint: + return lhsNum.Uint() == rhsNum.Uint() + case value.NumberKindInt: + return lhsNum.Int() == rhsNum.Int() + case value.NumberKindFloat: + return lhsNum.Float() == rhsNum.Float() + } + + case value.TypeString: + return lhs.Text() == rhs.Text() + + case value.TypeBool: + return lhs.Bool() == rhs.Bool() + + case value.TypeArray: + // Two arrays are equal if they have equal elements. + if lhs.Len() != rhs.Len() { + return false + } + for i := 0; i < lhs.Len(); i++ { + if !valuesEqual(lhs.Index(i), rhs.Index(i)) { + return false + } + } + return true + + case value.TypeObject: + // Two objects are equal if they have equal elements. + if lhs.Len() != rhs.Len() { + return false + } + for _, key := range lhs.Keys() { + lhsElement, _ := lhs.Key(key) + rhsElement, inRHS := rhs.Key(key) + if !inRHS { + return false + } + if !valuesEqual(lhsElement, rhsElement) { + return false + } + } + return true + + case value.TypeFunction: + // Two functions are never equal. We can't compare functions in Go, so + // there's no way to compare them in River right now. + return false + + case value.TypeCapsule: + // Two capsules are only equal if the underlying values are deeply equal. + return reflect.DeepEqual(lhs.Interface(), rhs.Interface()) + } + + panic("river/vm: unreachable") +} + +// binopAllowedTypes maps what type of values are permitted for a specific +// binary operation. +// +// token.EQ and token.NEQ are not included as they're handled separately from +// other binary ops. +var binopAllowedTypes = map[token.Token][]value.Type{ + token.OR: {value.TypeBool}, + token.AND: {value.TypeBool}, + + token.ADD: {value.TypeNumber, value.TypeString}, + token.SUB: {value.TypeNumber}, + token.MUL: {value.TypeNumber}, + token.DIV: {value.TypeNumber}, + token.MOD: {value.TypeNumber}, + token.POW: {value.TypeNumber}, + + token.LT: {value.TypeNumber, value.TypeString}, + token.GT: {value.TypeNumber, value.TypeString}, + token.LTE: {value.TypeNumber, value.TypeString}, + token.GTE: {value.TypeNumber, value.TypeString}, +} + +func acceptableBinopType(val value.Value, op token.Token) bool { + allowed, ok := binopAllowedTypes[op] + if !ok { + panic("river/vm: unexpected binop type") + } + + actualType := val.Type() + for _, allowType := range allowed { + if allowType == actualType { + return true + } + } + return false +} + +func fitNumberKinds(a, b value.NumberKind) value.NumberKind { + aPrec, bPrec := numberKindPrec[a], numberKindPrec[b] + if aPrec > bPrec { + return a + } + return b +} + +var numberKindPrec = map[value.NumberKind]int{ + value.NumberKindUint: 0, + value.NumberKindInt: 1, + value.NumberKindFloat: 2, +} + +func intPow[Number int64 | uint64](n, m Number) Number { + if m == 0 { + return 1 + } + result := n + for i := Number(2); i <= m; i++ { + result *= n + } + return result +} diff --git a/syntax/vm/op_binary_test.go b/syntax/vm/op_binary_test.go new file mode 100644 index 0000000000..45367777e4 --- /dev/null +++ b/syntax/vm/op_binary_test.go @@ -0,0 +1,94 @@ +package vm_test + +import ( + "reflect" + "testing" + + "github.com/grafana/river/parser" + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func TestVM_OptionalSecret_Conversion(t *testing.T) { + scope := &vm.Scope{ + Variables: map[string]any{ + "string_val": "hello", + "non_secret_val": rivertypes.OptionalSecret{IsSecret: false, Value: "world"}, + "secret_val": rivertypes.OptionalSecret{IsSecret: true, Value: "secret"}, + }, + } + + tt := []struct { + name string + input string + expect interface{} + expectError string + }{ + { + name: "string + capsule", + input: `string_val + non_secret_val`, + expect: string("helloworld"), + }, + { + name: "capsule + string", + input: `non_secret_val + string_val`, + expect: string("worldhello"), + }, + { + name: "string == capsule", + input: `"world" == non_secret_val`, + expect: bool(true), + }, + { + name: "capsule == string", + input: `non_secret_val == "world"`, + expect: bool(true), + }, + { + name: "capsule (secret) == capsule (secret)", + input: `secret_val == secret_val`, + expect: bool(true), + }, + { + name: "capsule (non secret) == capsule (non secret)", + input: `non_secret_val == non_secret_val`, + expect: bool(true), + }, + { + name: "capsule (non secret) == capsule (secret)", + input: `non_secret_val == secret_val`, + expect: bool(false), + }, + { + name: "secret + string", + input: `secret_val + string_val`, + expectError: "secret_val should be one of [number string] for binop +", + }, + { + name: "string + secret", + input: `string_val + secret_val`, + expectError: "secret_val should be one of [number string] for binop +", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + expectTy := reflect.TypeOf(tc.expect) + if expectTy == nil { + expectTy = reflect.TypeOf((*any)(nil)).Elem() + } + rv := reflect.New(expectTy) + + if err := vm.New(expr).Evaluate(scope, rv.Interface()); tc.expectError == "" { + require.NoError(t, err) + require.Equal(t, tc.expect, rv.Elem().Interface()) + } else { + require.ErrorContains(t, err, tc.expectError) + } + }) + } +} diff --git a/syntax/vm/op_unary.go b/syntax/vm/op_unary.go new file mode 100644 index 0000000000..bc116d58bc --- /dev/null +++ b/syntax/vm/op_unary.go @@ -0,0 +1,33 @@ +package vm + +import ( + "github.com/grafana/river/internal/value" + "github.com/grafana/river/token" +) + +func evalUnaryOp(op token.Token, val value.Value) (value.Value, error) { + switch op { + case token.NOT: + if val.Type() != value.TypeBool { + return value.Null, value.TypeError{Value: val, Expected: value.TypeBool} + } + return value.Bool(!val.Bool()), nil + + case token.SUB: + if val.Type() != value.TypeNumber { + return value.Null, value.TypeError{Value: val, Expected: value.TypeNumber} + } + + valNum := val.Number() + switch valNum.Kind() { + case value.NumberKindInt, value.NumberKindUint: + // It doesn't make much sense to invert a uint, so we always cast to an + // int and return an int. + return value.Int(-valNum.Int()), nil + case value.NumberKindFloat: + return value.Float(-valNum.Float()), nil + } + } + + panic("river/vm: unreachable") +} diff --git a/syntax/vm/struct_decoder.go b/syntax/vm/struct_decoder.go new file mode 100644 index 0000000000..99a0a2358a --- /dev/null +++ b/syntax/vm/struct_decoder.go @@ -0,0 +1,323 @@ +package vm + +import ( + "fmt" + "reflect" + "strings" + + "github.com/grafana/river/ast" + "github.com/grafana/river/diag" + "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/value" +) + +// structDecoder decodes a series of AST statements into a Go value. +type structDecoder struct { + VM *Evaluator + Scope *Scope + Assoc map[value.Value]ast.Node + TagInfo *tagInfo +} + +// Decode decodes the list of statements into the struct value specified by rv. +func (st *structDecoder) Decode(stmts ast.Body, rv reflect.Value) error { + // TODO(rfratto): potentially loosen this restriction and allow decoding into + // an interface{} or map[string]interface{}. + if rv.Kind() != reflect.Struct { + panic(fmt.Sprintf("river/vm: structDecoder expects struct, got %s", rv.Kind())) + } + + state := decodeOptions{ + Tags: st.TagInfo.TagLookup, + EnumBlocks: st.TagInfo.EnumLookup, + SeenAttrs: make(map[string]struct{}), + SeenBlocks: make(map[string]struct{}), + SeenEnums: make(map[string]struct{}), + + BlockCount: make(map[string]int), + BlockIndex: make(map[*ast.BlockStmt]int), + + EnumCount: make(map[string]int), + EnumIndex: make(map[*ast.BlockStmt]int), + } + + // Iterate over the set of blocks to populate block count and block index. + // Block index is its index in the set of blocks with the *same name*. + // + // If the block belongs to an enum, we populate enum count and enum index + // instead. The enum index is the index on the set of blocks for the *same + // enum*. + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.BlockStmt: + fullName := strings.Join(stmt.Name, ".") + + if enumTf, isEnum := st.TagInfo.EnumLookup[fullName]; isEnum { + enumName := strings.Join(enumTf.EnumField.Name, ".") + state.EnumIndex[stmt] = state.EnumCount[enumName] + state.EnumCount[enumName]++ + } else { + state.BlockIndex[stmt] = state.BlockCount[fullName] + state.BlockCount[fullName]++ + } + } + } + + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.AttributeStmt: + // TODO(rfratto): append to list of diagnostics instead of aborting early. + if err := st.decodeAttr(stmt, rv, &state); err != nil { + return err + } + + case *ast.BlockStmt: + // TODO(rfratto): append to list of diagnostics instead of aborting early. + if err := st.decodeBlock(stmt, rv, &state); err != nil { + return err + } + + default: + panic(fmt.Sprintf("river/vm: unrecognized node type %T", stmt)) + } + } + + for _, tf := range st.TagInfo.Tags { + // Ignore any optional tags. + if tf.IsOptional() { + continue + } + + fullName := strings.Join(tf.Name, ".") + + switch { + case tf.IsAttr(): + if _, consumed := state.SeenAttrs[fullName]; !consumed { + // TODO(rfratto): change to diagnostics. + return fmt.Errorf("missing required attribute %q", fullName) + } + + case tf.IsBlock(): + if _, consumed := state.SeenBlocks[fullName]; !consumed { + // TODO(rfratto): change to diagnostics. + return fmt.Errorf("missing required block %q", fullName) + } + } + } + + return nil +} + +type decodeOptions struct { + Tags map[string]rivertags.Field + EnumBlocks map[string]enumBlock + + SeenAttrs, SeenBlocks, SeenEnums map[string]struct{} + + // BlockCount and BlockIndex are used to determine: + // + // * How big a slice of blocks should be for a block of a given name (BlockCount) + // * Which element within that slice is a given block assigned to (BlockIndex) + // + // This is used for decoding a series of rule blocks for prometheus.relabel, + // where 5 rules would have a "rule" key in BlockCount with a value of 5, and + // where the first block would be index 0, the second block would be index 1, + // and so on. + // + // The index in BlockIndex is relative to a block name; the first block named + // "hello.world" and the first block named "fizz.buzz" both have index 0. + + BlockCount map[string]int // Number of times a block by full name is seen. + BlockIndex map[*ast.BlockStmt]int // Index of a block within a set of blocks with the same name. + + // EnumCount and EnumIndex are similar to BlockCount/BlockIndex, but instead + // reference the number of blocks assigned to the same enum (EnumCount) and + // the index of a block within that enum slice (EnumIndex). + + EnumCount map[string]int // Number of times an enum group is seen by enum name. + EnumIndex map[*ast.BlockStmt]int // Index of a block within a set of enum blocks of the same enum. +} + +func (st *structDecoder) decodeAttr(attr *ast.AttributeStmt, rv reflect.Value, state *decodeOptions) error { + fullName := attr.Name.Name + if _, seen := state.SeenAttrs[fullName]; seen { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(attr).Position(), + EndPos: ast.EndPos(attr).Position(), + Message: fmt.Sprintf("attribute %q may only be provided once", fullName), + }} + } + state.SeenAttrs[fullName] = struct{}{} + + tf, ok := state.Tags[fullName] + if !ok { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(attr).Position(), + EndPos: ast.EndPos(attr).Position(), + Message: fmt.Sprintf("unrecognized attribute name %q", fullName), + }} + } else if tf.IsBlock() { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(attr).Position(), + EndPos: ast.EndPos(attr).Position(), + Message: fmt.Sprintf("%q must be a block, but is used as an attribute", fullName), + }} + } + + // Decode the attribute. + val, err := st.VM.evaluateExpr(st.Scope, st.Assoc, attr.Value) + if err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + + // We're reconverting our reflect.Value back into an interface{}, so we + // need to also turn it back into a pointer for decoding. + field := reflectutil.GetOrAlloc(rv, tf) + if err := value.Decode(val, field.Addr().Interface()); err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + + return nil +} + +func (st *structDecoder) decodeBlock(block *ast.BlockStmt, rv reflect.Value, state *decodeOptions) error { + fullName := block.GetBlockName() + + if _, isEnum := state.EnumBlocks[fullName]; isEnum { + return st.decodeEnumBlock(fullName, block, rv, state) + } + return st.decodeNormalBlock(fullName, block, rv, state) +} + +// decodeNormalBlock decodes a standard (non-enum) block. +func (st *structDecoder) decodeNormalBlock(fullName string, block *ast.BlockStmt, rv reflect.Value, state *decodeOptions) error { + tf, isBlock := state.Tags[fullName] + if !isBlock { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + Message: fmt.Sprintf("unrecognized block name %q", fullName), + }} + } else if tf.IsAttr() { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + Message: fmt.Sprintf("%q must be an attribute, but is used as a block", fullName), + }} + } + + field := reflectutil.GetOrAlloc(rv, tf) + decodeField := prepareDecodeValue(field) + + switch decodeField.Kind() { + case reflect.Slice: + // If this is the first time we've seen the block, reset its length to + // zero. + if _, seen := state.SeenBlocks[fullName]; !seen { + count := state.BlockCount[fullName] + decodeField.Set(reflect.MakeSlice(decodeField.Type(), count, count)) + } + + blockIndex, ok := state.BlockIndex[block] + if !ok { + panic("river/vm: block not found in index lookup table") + } + decodeElement := prepareDecodeValue(decodeField.Index(blockIndex)) + err := st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeElement) + if err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + + case reflect.Array: + if decodeField.Len() != state.BlockCount[fullName] { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + Message: fmt.Sprintf( + "block %q must be specified exactly %d times, but was specified %d times", + fullName, + decodeField.Len(), + state.BlockCount[fullName], + ), + }} + } + + blockIndex, ok := state.BlockIndex[block] + if !ok { + panic("river/vm: block not found in index lookup table") + } + decodeElement := prepareDecodeValue(decodeField.Index(blockIndex)) + err := st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeElement) + if err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + + default: + if state.BlockCount[fullName] > 1 { + return diag.Diagnostics{{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + Message: fmt.Sprintf("block %q may only be specified once", fullName), + }} + } + + err := st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeField) + if err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + } + + state.SeenBlocks[fullName] = struct{}{} + return nil +} + +func (st *structDecoder) decodeEnumBlock(fullName string, block *ast.BlockStmt, rv reflect.Value, state *decodeOptions) error { + tf, ok := state.EnumBlocks[fullName] + if !ok { + // decodeEnumBlock should only ever be called from decodeBlock, so this + // should never happen. + panic("decodeEnumBlock called with a non-enum block") + } + + enumName := strings.Join(tf.EnumField.Name, ".") + enumField := reflectutil.GetOrAlloc(rv, tf.EnumField) + decodeField := prepareDecodeValue(enumField) + + if decodeField.Kind() != reflect.Slice { + panic("river/vm: enum field must be a slice kind, got " + decodeField.Kind().String()) + } + + // If this is the first time we've seen the enum, reset its length to zero. + if _, seen := state.SeenEnums[enumName]; !seen { + count := state.EnumCount[enumName] + decodeField.Set(reflect.MakeSlice(decodeField.Type(), count, count)) + } + state.SeenEnums[enumName] = struct{}{} + + // Prepare the enum element to decode into. + enumIndex, ok := state.EnumIndex[block] + if !ok { + panic("river/vm: enum block not found in index lookup table") + } + enumElement := prepareDecodeValue(decodeField.Index(enumIndex)) + + // Prepare the block field to decode into. + enumBlock := reflectutil.GetOrAlloc(enumElement, tf.BlockField) + decodeBlock := prepareDecodeValue(enumBlock) + + // Decode into the block field. + return st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeBlock) +} diff --git a/syntax/vm/tag_cache.go b/syntax/vm/tag_cache.go new file mode 100644 index 0000000000..f9c1b69c56 --- /dev/null +++ b/syntax/vm/tag_cache.go @@ -0,0 +1,80 @@ +package vm + +import ( + "reflect" + "strings" + "sync" + + "github.com/grafana/river/internal/rivertags" +) + +// tagsCache caches the river tags for a struct type. This is never cleared, +// but since most structs will be statically created throughout the lifetime +// of the process, this will consume a negligible amount of memory. +var tagsCache sync.Map + +func getCachedTagInfo(t reflect.Type) *tagInfo { + if t.Kind() != reflect.Struct { + panic("getCachedTagInfo called with non-struct type") + } + + if entry, ok := tagsCache.Load(t); ok { + return entry.(*tagInfo) + } + + tfs := rivertags.Get(t) + ti := &tagInfo{ + Tags: tfs, + TagLookup: make(map[string]rivertags.Field, len(tfs)), + EnumLookup: make(map[string]enumBlock), // The length is not known ahead of time + } + + for _, tf := range tfs { + switch { + case tf.IsAttr(), tf.IsBlock(): + fullName := strings.Join(tf.Name, ".") + ti.TagLookup[fullName] = tf + + case tf.IsEnum(): + fullName := strings.Join(tf.Name, ".") + + // Find all the blocks that match to the enum, and inject them into the + // EnumLookup table. + enumFieldType := t.FieldByIndex(tf.Index).Type + enumBlocksInfo := getCachedTagInfo(deferenceType(enumFieldType.Elem())) + for _, blockField := range enumBlocksInfo.TagLookup { + // The full name of the enum block is the name of the enum plus the + // name of the block, separated by '.' + enumBlockName := fullName + "." + strings.Join(blockField.Name, ".") + ti.EnumLookup[enumBlockName] = enumBlock{ + EnumField: tf, + BlockField: blockField, + } + } + } + } + + tagsCache.Store(t, ti) + return ti +} + +func deferenceType(ty reflect.Type) reflect.Type { + for ty.Kind() == reflect.Pointer { + ty = ty.Elem() + } + return ty +} + +type tagInfo struct { + Tags []rivertags.Field + TagLookup map[string]rivertags.Field + + // EnumLookup maps enum blocks to the enum field. For example, an enum block + // called "foo.foo" and "foo.bar" will both map to the "foo" enum field. + EnumLookup map[string]enumBlock +} + +type enumBlock struct { + EnumField rivertags.Field // Field in the parent struct of the enum slice + BlockField rivertags.Field // Field in the enum struct for the enum block +} diff --git a/syntax/vm/vm.go b/syntax/vm/vm.go new file mode 100644 index 0000000000..a9c6481593 --- /dev/null +++ b/syntax/vm/vm.go @@ -0,0 +1,486 @@ +// Package vm provides a River expression evaluator. +package vm + +import ( + "fmt" + "reflect" + "strings" + + "github.com/grafana/river/ast" + "github.com/grafana/river/diag" + "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/stdlib" + "github.com/grafana/river/internal/value" +) + +// Evaluator evaluates River AST nodes into Go values. Each Evaluator is bound +// to a single AST node. To evaluate the node, call Evaluate. +type Evaluator struct { + // node for the AST. + // + // Each Evaluator is bound to a single node to allow for future performance + // optimizations, allowing for precomputing and storing the result of + // anything that is constant. + node ast.Node +} + +// New creates a new Evaluator for the given AST node. The given node must be +// either an *ast.File, *ast.BlockStmt, ast.Body, or assignable to an ast.Expr. +func New(node ast.Node) *Evaluator { + return &Evaluator{node: node} +} + +// Evaluate evaluates the Evaluator's node into a River value and decodes that +// value into the Go value v. +// +// Each call to Evaluate may provide a different scope with new values for +// available variables. If a variable used by the Evaluator's node isn't +// defined in scope or any of the parent scopes, Evaluate will return an error. +func (vm *Evaluator) Evaluate(scope *Scope, v interface{}) (err error) { + // Track a map that allows us to associate values with ast.Nodes so we can + // return decorated error messages. + assoc := make(map[value.Value]ast.Node) + + defer func() { + if err != nil { + // Decorate the error on return. + err = makeDiagnostic(err, assoc) + } + }() + + switch node := vm.node.(type) { + case *ast.BlockStmt, ast.Body: + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer { + panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + } + return vm.evaluateBlockOrBody(scope, assoc, node, rv) + case *ast.File: + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer { + panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + } + return vm.evaluateBlockOrBody(scope, assoc, node.Body, rv) + default: + expr, ok := node.(ast.Expr) + if !ok { + panic(fmt.Sprintf("river/vm: unexpected value type %T", node)) + } + val, err := vm.evaluateExpr(scope, assoc, expr) + if err != nil { + return err + } + return value.Decode(val, v) + } +} + +func (vm *Evaluator) evaluateBlockOrBody(scope *Scope, assoc map[value.Value]ast.Node, node ast.Node, rv reflect.Value) error { + // Before decoding the block, we need to temporarily take the address of rv + // to handle the case of it implementing the unmarshaler interface. + if rv.CanAddr() { + rv = rv.Addr() + } + + if err, unmarshaled := vm.evaluateUnmarshalRiver(scope, assoc, node, rv); unmarshaled || err != nil { + return err + } + + if ru, ok := rv.Interface().(value.Defaulter); ok { + ru.SetToDefault() + } + + if err := vm.evaluateDecode(scope, assoc, node, rv); err != nil { + return err + } + + if ru, ok := rv.Interface().(value.Validator); ok { + if err := ru.Validate(); err != nil { + return err + } + } + + return nil +} + +func (vm *Evaluator) evaluateUnmarshalRiver(scope *Scope, assoc map[value.Value]ast.Node, node ast.Node, rv reflect.Value) (error, bool) { + if ru, ok := rv.Interface().(value.Unmarshaler); ok { + return ru.UnmarshalRiver(func(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer { + panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + } + return vm.evaluateBlockOrBody(scope, assoc, node, rv.Elem()) + }), true + } + + return nil, false +} + +func (vm *Evaluator) evaluateDecode(scope *Scope, assoc map[value.Value]ast.Node, node ast.Node, rv reflect.Value) error { + // TODO(rfratto): the errors returned by this function are missing context to + // be able to print line numbers. We need to return decorated error types. + + // Fully deference rv and allocate pointers as necessary. + for rv.Kind() == reflect.Pointer { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Interface { + var anyMap map[string]interface{} + into := reflect.MakeMap(reflect.TypeOf(anyMap)) + if err := vm.evaluateMap(scope, assoc, node, into); err != nil { + return err + } + + rv.Set(into) + return nil + } else if rv.Kind() == reflect.Map { + return vm.evaluateMap(scope, assoc, node, rv) + } else if rv.Kind() != reflect.Struct { + panic(fmt.Sprintf("river/vm: can only evaluate blocks into structs, got %s", rv.Kind())) + } + + ti := getCachedTagInfo(rv.Type()) + + var stmts ast.Body + switch node := node.(type) { + case *ast.BlockStmt: + // Decode the block label first. + if err := vm.evaluateBlockLabel(node, ti.Tags, rv); err != nil { + return err + } + stmts = node.Body + case ast.Body: + stmts = node + default: + panic(fmt.Sprintf("river/vm: unrecognized node type %T", node)) + } + + sd := structDecoder{ + VM: vm, + Scope: scope, + Assoc: assoc, + TagInfo: ti, + } + return sd.Decode(stmts, rv) +} + +// evaluateMap evaluates a block or a body into a map. +func (vm *Evaluator) evaluateMap(scope *Scope, assoc map[value.Value]ast.Node, node ast.Node, rv reflect.Value) error { + var stmts ast.Body + + switch node := node.(type) { + case *ast.BlockStmt: + if node.Label != "" { + return diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: node.NamePos.Position(), + EndPos: node.LCurlyPos.Position(), + Message: fmt.Sprintf("block %q requires non-empty label", strings.Join(node.Name, ".")), + } + } + stmts = node.Body + case ast.Body: + stmts = node + default: + panic(fmt.Sprintf("river/vm: unrecognized node type %T", node)) + } + + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.AttributeStmt: + val, err := vm.evaluateExpr(scope, assoc, stmt.Value) + if err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + + target := reflect.New(rv.Type().Elem()).Elem() + if err := value.Decode(val, target.Addr().Interface()); err != nil { + // TODO(rfratto): get error as diagnostics. + return err + } + rv.SetMapIndex(reflect.ValueOf(stmt.Name.Name), target) + + case *ast.BlockStmt: + // TODO(rfratto): potentially relax this restriction where nested blocks + // are permitted when decoding to a map. + return diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(stmt).Position(), + EndPos: ast.EndPos(stmt).Position(), + Message: "nested blocks not supported here", + } + + default: + panic(fmt.Sprintf("river/vm: unrecognized node type %T", stmt)) + } + } + + return nil +} + +func (vm *Evaluator) evaluateBlockLabel(node *ast.BlockStmt, tfs []rivertags.Field, rv reflect.Value) error { + var ( + labelField rivertags.Field + foundField bool + ) + for _, tf := range tfs { + if tf.Flags&rivertags.FlagLabel != 0 { + labelField = tf + foundField = true + break + } + } + + // Check for user errors first. + // + // We return parser.Error here to restrict the position of the error to just + // the name. We might be able to clean this up in the future by extending + // ValueError to have an explicit position. + switch { + case node.Label == "" && foundField: // No user label, but struct expects one + return diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: node.NamePos.Position(), + EndPos: node.LCurlyPos.Position(), + Message: fmt.Sprintf("block %q requires non-empty label", strings.Join(node.Name, ".")), + } + case node.Label != "" && !foundField: // User label, but struct doesn't expect one + return diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: node.NamePos.Position(), + EndPos: node.LCurlyPos.Position(), + Message: fmt.Sprintf("block %q does not support specifying labels", strings.Join(node.Name, ".")), + } + } + + if node.Label == "" { + // no-op: no labels to set. + return nil + } + + var ( + field = reflectutil.GetOrAlloc(rv, labelField) + fieldType = field.Type() + ) + if !reflect.TypeOf(node.Label).AssignableTo(fieldType) { + // The Label struct field needs to be a string. + panic(fmt.Sprintf("river/vm: cannot assign block label to non-string type %s", fieldType)) + } + field.Set(reflect.ValueOf(node.Label)) + return nil +} + +// prepareDecodeValue prepares v for decoding. Pointers will be fully +// dereferenced until finding a non-pointer value. nil pointers will be +// allocated. +func prepareDecodeValue(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Pointer { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} + +func (vm *Evaluator) evaluateExpr(scope *Scope, assoc map[value.Value]ast.Node, expr ast.Expr) (v value.Value, err error) { + defer func() { + if v != value.Null { + assoc[v] = expr + } + }() + + switch expr := expr.(type) { + case *ast.LiteralExpr: + return valueFromLiteral(expr.Value, expr.Kind) + + case *ast.BinaryExpr: + lhs, err := vm.evaluateExpr(scope, assoc, expr.Left) + if err != nil { + return value.Null, err + } + rhs, err := vm.evaluateExpr(scope, assoc, expr.Right) + if err != nil { + return value.Null, err + } + return evalBinop(lhs, expr.Kind, rhs) + + case *ast.ArrayExpr: + vals := make([]value.Value, len(expr.Elements)) + for i, element := range expr.Elements { + val, err := vm.evaluateExpr(scope, assoc, element) + if err != nil { + return value.Null, err + } + vals[i] = val + } + return value.Array(vals...), nil + + case *ast.ObjectExpr: + fields := make(map[string]value.Value, len(expr.Fields)) + for _, field := range expr.Fields { + val, err := vm.evaluateExpr(scope, assoc, field.Value) + if err != nil { + return value.Null, err + } + fields[field.Name.Name] = val + } + return value.Object(fields), nil + + case *ast.IdentifierExpr: + val, found := scope.Lookup(expr.Ident.Name) + if !found { + return value.Null, diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(expr).Position(), + EndPos: ast.EndPos(expr).Position(), + Message: fmt.Sprintf("identifier %q does not exist", expr.Ident.Name), + } + } + return value.Encode(val), nil + + case *ast.AccessExpr: + val, err := vm.evaluateExpr(scope, assoc, expr.Value) + if err != nil { + return value.Null, err + } + + switch val.Type() { + case value.TypeObject: + res, ok := val.Key(expr.Name.Name) + if !ok { + return value.Null, diag.Diagnostic{ + Severity: diag.SeverityLevelError, + StartPos: ast.StartPos(expr.Name).Position(), + EndPos: ast.EndPos(expr.Name).Position(), + Message: fmt.Sprintf("field %q does not exist", expr.Name.Name), + } + } + return res, nil + default: + return value.Null, value.Error{ + Value: val, + Inner: fmt.Errorf("cannot access field %q on value of type %s", expr.Name.Name, val.Type()), + } + } + + case *ast.IndexExpr: + val, err := vm.evaluateExpr(scope, assoc, expr.Value) + if err != nil { + return value.Null, err + } + idx, err := vm.evaluateExpr(scope, assoc, expr.Index) + if err != nil { + return value.Null, err + } + + switch val.Type() { + case value.TypeArray: + // Arrays are indexed with a number. + if idx.Type() != value.TypeNumber { + return value.Null, value.TypeError{Value: idx, Expected: value.TypeNumber} + } + intIndex := int(idx.Int()) + + if intIndex < 0 || intIndex >= val.Len() { + return value.Null, value.Error{ + Value: idx, + Inner: fmt.Errorf("index %d is out of range of array with length %d", intIndex, val.Len()), + } + } + return val.Index(intIndex), nil + + case value.TypeObject: + // Objects are indexed with a string. + if idx.Type() != value.TypeString { + return value.Null, value.TypeError{Value: idx, Expected: value.TypeString} + } + + field, ok := val.Key(idx.Text()) + if !ok { + // If a key doesn't exist in an object accessed with [], return null. + return value.Null, nil + } + return field, nil + + default: + return value.Null, value.Error{ + Value: val, + Inner: fmt.Errorf("expected object or array, got %s", val.Type()), + } + } + + case *ast.ParenExpr: + return vm.evaluateExpr(scope, assoc, expr.Inner) + + case *ast.UnaryExpr: + val, err := vm.evaluateExpr(scope, assoc, expr.Value) + if err != nil { + return value.Null, err + } + return evalUnaryOp(expr.Kind, val) + + case *ast.CallExpr: + funcVal, err := vm.evaluateExpr(scope, assoc, expr.Value) + if err != nil { + return funcVal, err + } + if funcVal.Type() != value.TypeFunction { + return value.Null, value.TypeError{Value: funcVal, Expected: value.TypeFunction} + } + + args := make([]value.Value, len(expr.Args)) + for i := 0; i < len(expr.Args); i++ { + args[i], err = vm.evaluateExpr(scope, assoc, expr.Args[i]) + if err != nil { + return value.Null, err + } + } + return funcVal.Call(args...) + + default: + panic(fmt.Sprintf("river/vm: unexpected ast.Expr type %T", expr)) + } +} + +// A Scope exposes a set of variables available to use during evaluation. +type Scope struct { + // Parent optionally points to a parent Scope containing more variable. + // Variables defined in children scopes take precedence over variables of the + // same name found in parent scopes. + Parent *Scope + + // Variables holds the list of available variable names that can be used when + // evaluating a node. + // + // Values in the Variables map should be considered immutable after passed to + // Evaluate; maps and slices will be copied by reference for performance + // optimizations. + Variables map[string]interface{} +} + +// Lookup looks up a named identifier from the scope, all of the scope's +// parents, and the stdlib. +func (s *Scope) Lookup(name string) (interface{}, bool) { + // Traverse the scope first, then fall back to stdlib. + for s != nil { + if val, ok := s.Variables[name]; ok { + return val, true + } + s = s.Parent + } + if ident, ok := stdlib.Identifiers[name]; ok { + return ident, true + } + return nil, false +} diff --git a/syntax/vm/vm_benchmarks_test.go b/syntax/vm/vm_benchmarks_test.go new file mode 100644 index 0000000000..e5530ccb37 --- /dev/null +++ b/syntax/vm/vm_benchmarks_test.go @@ -0,0 +1,106 @@ +package vm_test + +import ( + "fmt" + "math" + "reflect" + "testing" + + "github.com/grafana/river/parser" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func BenchmarkExprs(b *testing.B) { + // Shared scope across all tests below + scope := &vm.Scope{ + Variables: map[string]interface{}{ + "foobar": int(42), + }, + } + + tt := []struct { + name string + input string + expect interface{} + }{ + // Binops + {"or", `false || true`, bool(true)}, + {"and", `true && false`, bool(false)}, + {"math/eq", `3 == 5`, bool(false)}, + {"math/neq", `3 != 5`, bool(true)}, + {"math/lt", `3 < 5`, bool(true)}, + {"math/lte", `3 <= 5`, bool(true)}, + {"math/gt", `3 > 5`, bool(false)}, + {"math/gte", `3 >= 5`, bool(false)}, + {"math/add", `3 + 5`, int(8)}, + {"math/sub", `3 - 5`, int(-2)}, + {"math/mul", `3 * 5`, int(15)}, + {"math/div", `3 / 5`, int(0)}, + {"math/mod", `5 % 3`, int(2)}, + {"math/pow", `3 ^ 5`, int(243)}, + {"binop chain", `3 + 5 * 2`, int(13)}, // Chain multiple binops + + // Identifier + {"ident lookup", `foobar`, int(42)}, + + // Arrays + {"array", `[0, 1, 2]`, []int{0, 1, 2}}, + + // Objects + {"object to map", `{ a = 5, b = 10 }`, map[string]int{"a": 5, "b": 10}}, + { + name: "object to struct", + input: `{ + name = "John Doe", + age = 42, + }`, + expect: struct { + Name string `river:"name,attr"` + Age int `river:"age,attr"` + Country string `river:"country,attr,optional"` + }{ + Name: "John Doe", + Age: 42, + }, + }, + + // Access + {"access", `{ a = 15 }.a`, int(15)}, + {"nested access", `{ a = { b = 12 } }.a.b`, int(12)}, + + // Indexing + {"index", `[0, 1, 2][1]`, int(1)}, + {"nested index", `[[1,2,3]][0][2]`, int(3)}, + + // Paren + {"paren", `(15)`, int(15)}, + + // Unary + {"unary not", `!true`, bool(false)}, + {"unary neg", `-15`, int(-15)}, + {"unary float", `-15.0`, float64(-15.0)}, + {"unary int64", fmt.Sprintf("%v", math.MaxInt64), math.MaxInt64}, + {"unary uint64", fmt.Sprintf("%v", uint64(math.MaxInt64)+1), uint64(math.MaxInt64) + 1}, + // math.MaxUint64 + 1 = 18446744073709551616 + {"unary float64 from overflowing uint", "18446744073709551616", float64(18446744073709551616)}, + } + + for _, tc := range tt { + b.Run(tc.name, func(b *testing.B) { + b.StopTimer() + expr, err := parser.ParseExpression(tc.input) + require.NoError(b, err) + + eval := vm.New(expr) + b.StartTimer() + + expectType := reflect.TypeOf(tc.expect) + + for i := 0; i < b.N; i++ { + vPtr := reflect.New(expectType).Interface() + _ = eval.Evaluate(scope, vPtr) + } + }) + } +} diff --git a/syntax/vm/vm_block_test.go b/syntax/vm/vm_block_test.go new file mode 100644 index 0000000000..ebc2ff0e6b --- /dev/null +++ b/syntax/vm/vm_block_test.go @@ -0,0 +1,802 @@ +package vm_test + +import ( + "fmt" + "math" + "reflect" + "testing" + + "github.com/grafana/river/ast" + "github.com/grafana/river/parser" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +// This file contains tests for decoding blocks. + +func TestVM_File(t *testing.T) { + type block struct { + String string `river:"string,attr"` + Number int `river:"number,attr,optional"` + } + type file struct { + SettingA int `river:"setting_a,attr"` + SettingB int `river:"setting_b,attr,optional"` + + Block block `river:"some_block,block,optional"` + } + + input := ` + setting_a = 15 + + some_block { + string = "Hello, world!" + } + ` + + expect := file{ + SettingA: 15, + Block: block{ + String: "Hello, world!", + }, + } + + res, err := parser.ParseFile(t.Name(), []byte(input)) + require.NoError(t, err) + + eval := vm.New(res) + + var actual file + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) +} + +func TestVM_Block_Attributes(t *testing.T) { + t.Run("Decodes attributes", func(t *testing.T) { + type block struct { + Number int `river:"number,attr"` + String string `river:"string,attr"` + } + + input := `some_block { + number = 15 + string = "Hello, world!" + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.Number) + require.Equal(t, "Hello, world!", actual.String) + }) + + t.Run("Fails if attribute used as block", func(t *testing.T) { + type block struct { + Number int `river:"number,attr"` + } + + input := `some_block { + number {} + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `2:4: "number" must be an attribute, but is used as a block`) + }) + + t.Run("Fails if required attributes are not present", func(t *testing.T) { + type block struct { + Number int `river:"number,attr"` + String string `river:"string,attr"` + } + + input := `some_block { + number = 15 + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `missing required attribute "string"`) + }) + + t.Run("Succeeds if optional attributes are not present", func(t *testing.T) { + type block struct { + Number int `river:"number,attr"` + String string `river:"string,attr,optional"` + } + + input := `some_block { + number = 15 + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.Number) + require.Equal(t, "", actual.String) + }) + + t.Run("Fails if attribute is not defined in struct", func(t *testing.T) { + type block struct { + Number int `river:"number,attr"` + } + + input := `some_block { + number = 15 + invalid = "This attribute does not exist!" + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `3:4: unrecognized attribute name "invalid"`) + }) + + t.Run("Tests decoding into an interface", func(t *testing.T) { + type block struct { + Anything interface{} `river:"anything,attr"` + } + + tests := []struct { + testName string + val string + expectedValType reflect.Kind + }{ + {testName: "test_int_1", val: "15", expectedValType: reflect.Int}, + {testName: "test_int_2", val: "-15", expectedValType: reflect.Int}, + {testName: "test_int_3", val: fmt.Sprintf("%v", math.MaxInt64), expectedValType: reflect.Int}, + {testName: "test_int_4", val: fmt.Sprintf("%v", math.MinInt64), expectedValType: reflect.Int}, + {testName: "test_uint_1", val: fmt.Sprintf("%v", uint64(math.MaxInt64)+1), expectedValType: reflect.Uint64}, + {testName: "test_uint_2", val: fmt.Sprintf("%v", uint64(math.MaxUint64)), expectedValType: reflect.Uint64}, + {testName: "test_float_1", val: fmt.Sprintf("%v9", math.MinInt64), expectedValType: reflect.Float64}, + {testName: "test_float_2", val: fmt.Sprintf("%v9", uint64(math.MaxUint64)), expectedValType: reflect.Float64}, + {testName: "test_float_3", val: "16.0", expectedValType: reflect.Float64}, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + input := fmt.Sprintf(`some_block { + anything = %s + }`, tt.val) + eval := vm.New(parseBlock(t, input)) + + var actual block + err := eval.Evaluate(nil, &actual) + require.NoError(t, err) + require.Equal(t, tt.expectedValType.String(), reflect.TypeOf(actual.Anything).Kind().String()) + }) + } + }) + + t.Run("Supports arbitrarily nested struct pointer fields", func(t *testing.T) { + type block struct { + NumberA int `river:"number_a,attr"` + NumberB *int `river:"number_b,attr"` + NumberC **int `river:"number_c,attr"` + NumberD ***int `river:"number_d,attr"` + } + + input := `some_block { + number_a = 15 + number_b = 20 + number_c = 25 + number_d = 30 + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.NumberA) + require.Equal(t, 20, *actual.NumberB) + require.Equal(t, 25, **actual.NumberC) + require.Equal(t, 30, ***actual.NumberD) + }) + + t.Run("Supports squashed attributes", func(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr,optional"` + InnerField2 string `river:"inner_field_2,attr,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner InnerStruct `river:",squash"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + input = `some_block { + outer_field_1 = "value1" + outer_field_2 = "value2" + inner_field_1 = "value3" + inner_field_2 = "value4" + }` + + expect = OuterStruct{ + OuterField1: "value1", + Inner: InnerStruct{ + InnerField1: "value3", + InnerField2: "value4", + }, + OuterField2: "value2", + } + ) + eval := vm.New(parseBlock(t, input)) + + var actual OuterStruct + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) + + t.Run("Supports squashed attributes in pointers", func(t *testing.T) { + type InnerStruct struct { + InnerField1 string `river:"inner_field_1,attr,optional"` + InnerField2 string `river:"inner_field_2,attr,optional"` + } + + type OuterStruct struct { + OuterField1 string `river:"outer_field_1,attr,optional"` + Inner *InnerStruct `river:",squash"` + OuterField2 string `river:"outer_field_2,attr,optional"` + } + + var ( + input = `some_block { + outer_field_1 = "value1" + outer_field_2 = "value2" + inner_field_1 = "value3" + inner_field_2 = "value4" + }` + + expect = OuterStruct{ + OuterField1: "value1", + Inner: &InnerStruct{ + InnerField1: "value3", + InnerField2: "value4", + }, + OuterField2: "value2", + } + ) + eval := vm.New(parseBlock(t, input)) + + var actual OuterStruct + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) +} + +func TestVM_Block_Children_Blocks(t *testing.T) { + type childBlock struct { + Attr bool `river:"attr,attr"` + } + + t.Run("Decodes children blocks", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Child childBlock `river:"child.block,block"` + } + + input := `some_block { + value = 15 + + child.block { attr = true } + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.Value) + require.True(t, actual.Child.Attr) + }) + + t.Run("Decodes multiple instances of children blocks", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Children []childBlock `river:"child.block,block"` + } + + input := `some_block { + value = 10 + + child.block { attr = true } + child.block { attr = false } + child.block { attr = true } + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 10, actual.Value) + require.Len(t, actual.Children, 3) + require.Equal(t, true, actual.Children[0].Attr) + require.Equal(t, false, actual.Children[1].Attr) + require.Equal(t, true, actual.Children[2].Attr) + }) + + t.Run("Decodes multiple instances of children blocks into an array", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Children [3]childBlock `river:"child.block,block"` + } + + input := `some_block { + value = 15 + + child.block { attr = true } + child.block { attr = false } + child.block { attr = true } + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.Value) + require.Equal(t, true, actual.Children[0].Attr) + require.Equal(t, false, actual.Children[1].Attr) + require.Equal(t, true, actual.Children[2].Attr) + }) + + t.Run("Fails if block used as an attribute", func(t *testing.T) { + type block struct { + Child childBlock `river:"child,block"` + } + + input := `some_block { + child = 15 + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `2:4: "child" must be a block, but is used as an attribute`) + }) + + t.Run("Fails if required children blocks are not present", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Child childBlock `river:"child.block,block"` + } + + input := `some_block { + value = 15 + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `missing required block "child.block"`) + }) + + t.Run("Succeeds if optional children blocks are not present", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Child childBlock `river:"child.block,block,optional"` + } + + input := `some_block { + value = 15 + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, 15, actual.Value) + }) + + t.Run("Fails if child block is not defined in struct", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + } + + input := `some_block { + value = 15 + + child.block { attr = true } + }` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `4:4: unrecognized block name "child.block"`) + }) + + t.Run("Supports arbitrarily nested struct pointer fields", func(t *testing.T) { + type block struct { + BlockA childBlock `river:"block_a,block"` + BlockB *childBlock `river:"block_b,block"` + BlockC **childBlock `river:"block_c,block"` + BlockD ***childBlock `river:"block_d,block"` + } + + input := `some_block { + block_a { attr = true } + block_b { attr = false } + block_c { attr = true } + block_d { attr = false } + }` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, true, (actual.BlockA).Attr) + require.Equal(t, false, (*actual.BlockB).Attr) + require.Equal(t, true, (**actual.BlockC).Attr) + require.Equal(t, false, (***actual.BlockD).Attr) + }) + + t.Run("Supports squashed blocks", func(t *testing.T) { + type InnerStruct struct { + Inner1 childBlock `river:"inner_block_1,block"` + Inner2 childBlock `river:"inner_block_2,block"` + } + + type OuterStruct struct { + Outer1 childBlock `river:"outer_block_1,block"` + Inner InnerStruct `river:",squash"` + Outer2 childBlock `river:"outer_block_2,block"` + } + + var ( + input = `some_block { + outer_block_1 { attr = true } + outer_block_2 { attr = false } + inner_block_1 { attr = true } + inner_block_2 { attr = false } + }` + + expect = OuterStruct{ + Outer1: childBlock{Attr: true}, + Outer2: childBlock{Attr: false}, + Inner: InnerStruct{ + Inner1: childBlock{Attr: true}, + Inner2: childBlock{Attr: false}, + }, + } + ) + eval := vm.New(parseBlock(t, input)) + + var actual OuterStruct + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) + + t.Run("Supports squashed blocks in pointers", func(t *testing.T) { + type InnerStruct struct { + Inner1 *childBlock `river:"inner_block_1,block"` + Inner2 *childBlock `river:"inner_block_2,block"` + } + + type OuterStruct struct { + Outer1 childBlock `river:"outer_block_1,block"` + Inner *InnerStruct `river:",squash"` + Outer2 childBlock `river:"outer_block_2,block"` + } + + var ( + input = `some_block { + outer_block_1 { attr = true } + outer_block_2 { attr = false } + inner_block_1 { attr = true } + inner_block_2 { attr = false } + }` + + expect = OuterStruct{ + Outer1: childBlock{Attr: true}, + Outer2: childBlock{Attr: false}, + Inner: &InnerStruct{ + Inner1: &childBlock{Attr: true}, + Inner2: &childBlock{Attr: false}, + }, + } + ) + eval := vm.New(parseBlock(t, input)) + + var actual OuterStruct + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) + + // TODO(rfratto): decode all blocks into a []*ast.BlockStmt field. +} + +func TestVM_Block_Enum_Block(t *testing.T) { + type childBlock struct { + Attr int `river:"attr,attr"` + } + + type enumBlock struct { + BlockA *childBlock `river:"a,block,optional"` + BlockB *childBlock `river:"b,block,optional"` + BlockC *childBlock `river:"c,block,optional"` + BlockD *childBlock `river:"d,block,optional"` + } + + t.Run("Decodes enum blocks", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Blocks []*enumBlock `river:"child,enum,optional"` + } + + input := `some_block { + value = 15 + + child.a { attr = 1 } + }` + eval := vm.New(parseBlock(t, input)) + + expect := block{ + Value: 15, + Blocks: []*enumBlock{ + {BlockA: &childBlock{Attr: 1}}, + }, + } + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) + + t.Run("Decodes multiple enum blocks", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Blocks []*enumBlock `river:"child,enum,optional"` + } + + input := `some_block { + value = 15 + + child.b { attr = 1 } + child.a { attr = 2 } + child.c { attr = 3 } + }` + eval := vm.New(parseBlock(t, input)) + + expect := block{ + Value: 15, + Blocks: []*enumBlock{ + {BlockB: &childBlock{Attr: 1}}, + {BlockA: &childBlock{Attr: 2}}, + {BlockC: &childBlock{Attr: 3}}, + }, + } + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) + + t.Run("Decodes multiple enum blocks with repeating blocks", func(t *testing.T) { + type block struct { + Value int `river:"value,attr"` + Blocks []*enumBlock `river:"child,enum,optional"` + } + + input := `some_block { + value = 15 + + child.a { attr = 1 } + child.b { attr = 2 } + child.c { attr = 3 } + child.a { attr = 4 } + }` + eval := vm.New(parseBlock(t, input)) + + expect := block{ + Value: 15, + Blocks: []*enumBlock{ + {BlockA: &childBlock{Attr: 1}}, + {BlockB: &childBlock{Attr: 2}}, + {BlockC: &childBlock{Attr: 3}}, + {BlockA: &childBlock{Attr: 4}}, + }, + } + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, expect, actual) + }) +} + +func TestVM_Block_Label(t *testing.T) { + t.Run("Decodes label into string field", func(t *testing.T) { + type block struct { + Label string `river:",label"` + } + + input := `some_block "label_value_1" {}` + eval := vm.New(parseBlock(t, input)) + + var actual block + require.NoError(t, eval.Evaluate(nil, &actual)) + require.Equal(t, "label_value_1", actual.Label) + }) + + t.Run("Struct must have label field if block is labeled", func(t *testing.T) { + type block struct{} + + input := `some_block "label_value_2" {}` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `1:1: block "some_block" does not support specifying labels`) + }) + + t.Run("Block must have label if struct accepts label", func(t *testing.T) { + type block struct { + Label string `river:",label"` + } + + input := `some_block {}` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `1:1: block "some_block" requires non-empty label`) + }) + + t.Run("Block must have non-empty label if struct accepts label", func(t *testing.T) { + type block struct { + Label string `river:",label"` + } + + input := `some_block "" {}` + eval := vm.New(parseBlock(t, input)) + + err := eval.Evaluate(nil, &block{}) + require.EqualError(t, err, `1:1: block "some_block" requires non-empty label`) + }) +} + +func TestVM_Block_Unmarshaler(t *testing.T) { + type OuterBlock struct { + FieldA string `river:"field_a,attr"` + Settings Setting `river:"some.settings,block"` + } + + input := ` + field_a = "foobar" + some.settings { + field_a = "fizzbuzz" + field_b = "helloworld" + } + ` + + file, err := parser.ParseFile(t.Name(), []byte(input)) + require.NoError(t, err) + + eval := vm.New(file) + + var actual OuterBlock + require.NoError(t, eval.Evaluate(nil, &actual)) + require.True(t, actual.Settings.UnmarshalCalled, "UnmarshalRiver did not get invoked") + require.True(t, actual.Settings.DefaultCalled, "SetToDefault did not get invoked") + require.True(t, actual.Settings.ValidateCalled, "Validate did not get invoked") +} + +func TestVM_Block_UnmarshalToMap(t *testing.T) { + type OuterBlock struct { + Settings map[string]interface{} `river:"some.settings,block"` + } + + tt := []struct { + name string + input string + expect OuterBlock + expectError string + }{ + { + name: "decodes successfully", + input: ` + some.settings { + field_a = 12345 + field_b = "helloworld" + } + `, + expect: OuterBlock{ + Settings: map[string]interface{}{ + "field_a": 12345, + "field_b": "helloworld", + }, + }, + }, + { + name: "rejects labeled blocks", + input: ` + some.settings "foo" { + field_a = 12345 + } + `, + expectError: `block "some.settings" requires non-empty label`, + }, + + { + name: "rejects nested maps", + input: ` + some.settings { + inner_map { + field_a = 12345 + } + } + `, + expectError: "nested blocks not supported here", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + file, err := parser.ParseFile(t.Name(), []byte(tc.input)) + require.NoError(t, err) + + eval := vm.New(file) + + var actual OuterBlock + err = eval.Evaluate(nil, &actual) + + if tc.expectError == "" { + require.NoError(t, err) + require.Equal(t, tc.expect, actual) + } else { + require.ErrorContains(t, err, tc.expectError) + } + }) + } +} + +func TestVM_Block_UnmarshalToAny(t *testing.T) { + type OuterBlock struct { + Settings any `river:"some.settings,block"` + } + + input := ` + some.settings { + field_a = 12345 + field_b = "helloworld" + } + ` + + file, err := parser.ParseFile(t.Name(), []byte(input)) + require.NoError(t, err) + + eval := vm.New(file) + + var actual OuterBlock + require.NoError(t, eval.Evaluate(nil, &actual)) + + expect := map[string]interface{}{ + "field_a": 12345, + "field_b": "helloworld", + } + require.Equal(t, expect, actual.Settings) +} + +type Setting struct { + FieldA string `river:"field_a,attr"` + FieldB string `river:"field_b,attr"` + + UnmarshalCalled bool + DefaultCalled bool + ValidateCalled bool +} + +func (s *Setting) UnmarshalRiver(f func(interface{}) error) error { + s.UnmarshalCalled = true + return f((*settingUnmarshalTarget)(s)) +} + +type settingUnmarshalTarget Setting + +func (s *settingUnmarshalTarget) SetToDefault() { + s.DefaultCalled = true +} + +func (s *settingUnmarshalTarget) Validate() error { + s.ValidateCalled = true + return nil +} + +func parseBlock(t *testing.T, input string) *ast.BlockStmt { + t.Helper() + + res, err := parser.ParseFile("", []byte(input)) + require.NoError(t, err) + require.Len(t, res.Body, 1) + + stmt, ok := res.Body[0].(*ast.BlockStmt) + require.True(t, ok, "Expected stmt to be a ast.BlockStmt, got %T", res.Body[0]) + return stmt +} diff --git a/syntax/vm/vm_errors_test.go b/syntax/vm/vm_errors_test.go new file mode 100644 index 0000000000..87acdd7b1b --- /dev/null +++ b/syntax/vm/vm_errors_test.go @@ -0,0 +1,80 @@ +package vm_test + +import ( + "testing" + + "github.com/grafana/river/parser" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func TestVM_ExprErrors(t *testing.T) { + type Target struct { + Key struct { + Object struct { + Field1 []int `river:"field1,attr"` + } `river:"object,attr"` + } `river:"key,attr"` + } + + tt := []struct { + name string + input string + into interface{} + scope *vm.Scope + expect string + }{ + { + name: "basic wrong type", + input: `key = true`, + into: &Target{}, + expect: "test:1:7: true should be object, got bool", + }, + { + name: "deeply nested literal", + input: ` + key = { + object = { + field1 = [15, 30, "Hello, world!"], + }, + } + `, + into: &Target{}, + expect: `test:4:25: "Hello, world!" should be number, got string`, + }, + { + name: "deeply nested indirect", + input: `key = key_value`, + into: &Target{}, + scope: &vm.Scope{ + Variables: map[string]interface{}{ + "key_value": map[string]interface{}{ + "object": map[string]interface{}{ + "field1": []interface{}{15, 30, "Hello, world!"}, + }, + }, + }, + }, + expect: `test:1:7: key_value.object.field1[2] should be number, got string`, + }, + { + name: "complex expr", + input: `key = [0, 1, 2]`, + into: &struct { + Key string `river:"key,attr"` + }{}, + expect: `test:1:7: [0, 1, 2] should be string, got array`, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + res, err := parser.ParseFile("test", []byte(tc.input)) + require.NoError(t, err) + + eval := vm.New(res) + err = eval.Evaluate(tc.scope, tc.into) + require.EqualError(t, err, tc.expect) + }) + } +} diff --git a/syntax/vm/vm_stdlib_test.go b/syntax/vm/vm_stdlib_test.go new file mode 100644 index 0000000000..f395f199e5 --- /dev/null +++ b/syntax/vm/vm_stdlib_test.go @@ -0,0 +1,232 @@ +package vm_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/grafana/river/internal/value" + "github.com/grafana/river/parser" + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func TestVM_Stdlib(t *testing.T) { + t.Setenv("TEST_VAR", "Hello!") + + tt := []struct { + name string + input string + expect interface{} + }{ + {"env", `env("TEST_VAR")`, string("Hello!")}, + {"concat", `concat([true, "foo"], [], [false, 1])`, []interface{}{true, "foo", false, 1}}, + {"json_decode object", `json_decode("{\"foo\": \"bar\"}")`, map[string]interface{}{"foo": "bar"}}, + {"json_decode array", `json_decode("[0, 1, 2]")`, []interface{}{float64(0), float64(1), float64(2)}}, + {"json_decode nil field", `json_decode("{\"foo\": null}")`, map[string]interface{}{"foo": nil}}, + {"json_decode nil array element", `json_decode("[0, null]")`, []interface{}{float64(0), nil}}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + rv := reflect.New(reflect.TypeOf(tc.expect)) + require.NoError(t, eval.Evaluate(nil, rv.Interface())) + require.Equal(t, tc.expect, rv.Elem().Interface()) + }) + } +} + +func TestStdlibCoalesce(t *testing.T) { + t.Setenv("TEST_VAR2", "Hello!") + + tt := []struct { + name string + input string + expect interface{} + }{ + {"coalesce()", `coalesce()`, value.Null}, + {"coalesce(string)", `coalesce("Hello!")`, string("Hello!")}, + {"coalesce(string, string)", `coalesce(env("TEST_VAR2"), "World!")`, string("Hello!")}, + {"(string, string) with fallback", `coalesce(env("NON_DEFINED"), "World!")`, string("World!")}, + {"coalesce(list, list)", `coalesce([], ["fallback"])`, []string{"fallback"}}, + {"coalesce(list, list) with fallback", `coalesce(concat(["item"]), ["fallback"])`, []string{"item"}}, + {"coalesce(int, int, int)", `coalesce(0, 1, 2)`, 1}, + {"coalesce(bool, int, int)", `coalesce(false, 1, 2)`, 1}, + {"coalesce(bool, bool)", `coalesce(false, true)`, true}, + {"coalesce(list, bool)", `coalesce(json_decode("[]"), true)`, true}, + {"coalesce(object, true) and return true", `coalesce(json_decode("{}"), true)`, true}, + {"coalesce(object, false) and return false", `coalesce(json_decode("{}"), false)`, false}, + {"coalesce(list, nil)", `coalesce([],null)`, value.Null}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + rv := reflect.New(reflect.TypeOf(tc.expect)) + require.NoError(t, eval.Evaluate(nil, rv.Interface())) + require.Equal(t, tc.expect, rv.Elem().Interface()) + }) + } +} + +func TestStdlibJsonPath(t *testing.T) { + tt := []struct { + name string + input string + expect interface{} + }{ + {"json_path with simple json", `json_path("{\"a\": \"b\"}", ".a")`, []string{"b"}}, + {"json_path with simple json without results", `json_path("{\"a\": \"b\"}", ".nonexists")`, []string{}}, + {"json_path with json array", `json_path("[{\"name\": \"Department\",\"value\": \"IT\"},{\"name\":\"ReferenceNumber\",\"value\":\"123456\"},{\"name\":\"TestStatus\",\"value\":\"Pending\"}]", "[?(@.name == \"Department\")].value")`, []string{"IT"}}, + {"json_path with simple json and return first", `json_path("{\"a\": \"b\"}", ".a")[0]`, "b"}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + rv := reflect.New(reflect.TypeOf(tc.expect)) + require.NoError(t, eval.Evaluate(nil, rv.Interface())) + require.Equal(t, tc.expect, rv.Elem().Interface()) + }) + } +} + +func TestStdlib_Nonsensitive(t *testing.T) { + scope := &vm.Scope{ + Variables: map[string]any{ + "secret": rivertypes.Secret("foo"), + "optionalSecret": rivertypes.OptionalSecret{Value: "bar"}, + }, + } + + tt := []struct { + name string + input string + expect interface{} + }{ + {"secret to string", `nonsensitive(secret)`, string("foo")}, + {"optional secret to string", `nonsensitive(optionalSecret)`, string("bar")}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + rv := reflect.New(reflect.TypeOf(tc.expect)) + require.NoError(t, eval.Evaluate(scope, rv.Interface())) + require.Equal(t, tc.expect, rv.Elem().Interface()) + }) + } +} +func TestStdlib_StringFunc(t *testing.T) { + scope := &vm.Scope{ + Variables: map[string]any{}, + } + + tt := []struct { + name string + input string + expect interface{} + }{ + {"to_lower", `to_lower("String")`, "string"}, + {"to_upper", `to_upper("string")`, "STRING"}, + {"trimspace", `trim_space(" string \n\n")`, "string"}, + {"trimspace+to_upper+trim", `to_lower(to_upper(trim_space(" String ")))`, "string"}, + {"split", `split("/aaa/bbb/ccc/ddd", "/")`, []string{"", "aaa", "bbb", "ccc", "ddd"}}, + {"split+index", `split("/aaa/bbb/ccc/ddd", "/")[0]`, ""}, + {"join+split", `join(split("/aaa/bbb/ccc/ddd", "/"), "/")`, "/aaa/bbb/ccc/ddd"}, + {"join", `join(["foo", "bar", "baz"], ", ")`, "foo, bar, baz"}, + {"join w/ int", `join([0, 0, 1], ", ")`, "0, 0, 1"}, + {"format", `format("Hello %s", "World")`, "Hello World"}, + {"format+int", `format("%#v", 1)`, "1"}, + {"format+bool", `format("%#v", true)`, "true"}, + {"format+quote", `format("%q", "hello")`, `"hello"`}, + {"replace", `replace("Hello World", " World", "!")`, "Hello!"}, + {"trim", `trim("?!hello?!", "!?")`, "hello"}, + {"trim2", `trim(" hello! world.! ", "! ")`, "hello! world."}, + {"trim_prefix", `trim_prefix("helloworld", "hello")`, "world"}, + {"trim_suffix", `trim_suffix("helloworld", "world")`, "hello"}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + rv := reflect.New(reflect.TypeOf(tc.expect)) + require.NoError(t, eval.Evaluate(scope, rv.Interface())) + require.Equal(t, tc.expect, rv.Elem().Interface()) + }) + } +} + +func BenchmarkConcat(b *testing.B) { + // There's a bit of setup work to do here: we want to create a scope holding + // a slice of the Person type, which has a fair amount of data in it. + // + // We then want to pass it through concat. + // + // If the code path is fully optimized, there will be no intermediate + // translations to interface{}. + type Person struct { + Name string `river:"name,attr"` + Attrs map[string]string `river:"attrs,attr"` + } + type Body struct { + Values []Person `river:"values,attr"` + } + + in := `values = concat(values_ref)` + f, err := parser.ParseFile("", []byte(in)) + require.NoError(b, err) + + eval := vm.New(f) + + valuesRef := make([]Person, 0, 20) + for i := 0; i < 20; i++ { + data := make(map[string]string, 20) + for j := 0; j < 20; j++ { + var ( + key = fmt.Sprintf("key_%d", i+1) + value = fmt.Sprintf("value_%d", i+1) + ) + data[key] = value + } + valuesRef = append(valuesRef, Person{ + Name: "Test Person", + Attrs: data, + }) + } + scope := &vm.Scope{ + Variables: map[string]interface{}{ + "values_ref": valuesRef, + }, + } + + // Reset timer before running the actual test + b.ResetTimer() + + for i := 0; i < b.N; i++ { + var b Body + _ = eval.Evaluate(scope, &b) + } +} diff --git a/syntax/vm/vm_test.go b/syntax/vm/vm_test.go new file mode 100644 index 0000000000..5591b08d88 --- /dev/null +++ b/syntax/vm/vm_test.go @@ -0,0 +1,277 @@ +package vm_test + +import ( + "reflect" + "strings" + "testing" + "unicode" + + "github.com/grafana/river/parser" + "github.com/grafana/river/scanner" + "github.com/grafana/river/token" + "github.com/grafana/river/vm" + "github.com/stretchr/testify/require" +) + +func TestVM_Evaluate_Literals(t *testing.T) { + tt := map[string]struct { + input string + expect interface{} + }{ + "number to int": {`12`, int(12)}, + "number to int8": {`13`, int8(13)}, + "number to int16": {`14`, int16(14)}, + "number to int32": {`15`, int32(15)}, + "number to int64": {`16`, int64(16)}, + "number to uint": {`17`, uint(17)}, + "number to uint8": {`18`, uint8(18)}, + "number to uint16": {`19`, uint16(19)}, + "number to uint32": {`20`, uint32(20)}, + "number to uint64": {`21`, uint64(21)}, + "number to float32": {`22`, float32(22)}, + "number to float64": {`23`, float64(23)}, + "number to string": {`24`, string("24")}, + + "float to float32": {`3.2`, float32(3.2)}, + "float to float64": {`3.5`, float64(3.5)}, + "float to string": {`3.9`, string("3.9")}, + + "float with dot to float32": {`.2`, float32(0.2)}, + "float with dot to float64": {`.5`, float64(0.5)}, + "float with dot to string": {`.9`, string("0.9")}, + + "string to string": {`"Hello, world!"`, string("Hello, world!")}, + "string to int": {`"12"`, int(12)}, + "string to float64": {`"12"`, float64(12)}, + } + + for name, tc := range tt { + t.Run(name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + vPtr := reflect.New(reflect.TypeOf(tc.expect)).Interface() + require.NoError(t, eval.Evaluate(nil, vPtr)) + + actual := reflect.ValueOf(vPtr).Elem().Interface() + require.Equal(t, tc.expect, actual) + }) + } +} + +func TestVM_Evaluate(t *testing.T) { + // Shared scope across all tests below + scope := &vm.Scope{ + Variables: map[string]interface{}{ + "foobar": int(42), + }, + } + + tt := []struct { + input string + expect interface{} + }{ + // Binops + {`true || false`, bool(true)}, + {`false || false`, bool(false)}, + {`true && false`, bool(false)}, + {`true && true`, bool(true)}, + {`3 == 5`, bool(false)}, + {`3 == 3`, bool(true)}, + {`3 != 5`, bool(true)}, + {`3 < 5`, bool(true)}, + {`3 <= 5`, bool(true)}, + {`3 > 5`, bool(false)}, + {`3 >= 5`, bool(false)}, + {`3 + 5`, int(8)}, + {`3 - 5`, int(-2)}, + {`3 * 5`, int(15)}, + {`3.0 / 5.0`, float64(0.6)}, + {`5 % 3`, int(2)}, + {`3 ^ 5`, int(243)}, + {`3 + 5 * 2`, int(13)}, // Chain multiple binops + {`42.0^-2`, float64(0.0005668934240362812)}, + + // Identifier + {`foobar`, int(42)}, + + // Arrays + {`[]`, []int{}}, + {`[0, 1, 2]`, []int{0, 1, 2}}, + {`[true, false]`, []bool{true, false}}, + + // Objects + {`{ a = 5, b = 10 }`, map[string]int{"a": 5, "b": 10}}, + { + input: `{ + name = "John Doe", + age = 42, + }`, + expect: struct { + Name string `river:"name,attr"` + Age int `river:"age,attr"` + Country string `river:"country,attr,optional"` + }{ + Name: "John Doe", + Age: 42, + }, + }, + + // Access + {`{ a = 15 }.a`, int(15)}, + {`{ a = { b = 12 } }.a.b`, int(12)}, + {`{}["foo"]`, nil}, + + // Indexing + {`[0, 1, 2][1]`, int(1)}, + {`[[1,2,3]][0][2]`, int(3)}, + {`[true, false][0]`, bool(true)}, + + // Paren + {`(15)`, int(15)}, + + // Unary + {`!true`, bool(false)}, + {`!false`, bool(true)}, + {`-15`, int(-15)}, + } + + for _, tc := range tt { + name := trimWhitespace(tc.input) + + t.Run(name, func(t *testing.T) { + expr, err := parser.ParseExpression(tc.input) + require.NoError(t, err) + + eval := vm.New(expr) + + var vPtr any + if tc.expect != nil { + vPtr = reflect.New(reflect.TypeOf(tc.expect)).Interface() + } else { + // Create a new any pointer. + vPtr = reflect.New(reflect.TypeOf((*any)(nil)).Elem()).Interface() + } + + require.NoError(t, eval.Evaluate(scope, vPtr)) + + actual := reflect.ValueOf(vPtr).Elem().Interface() + require.Equal(t, tc.expect, actual) + }) + } +} + +func TestVM_Evaluate_Null(t *testing.T) { + expr, err := parser.ParseExpression("null") + require.NoError(t, err) + + eval := vm.New(expr) + + var v interface{} + require.NoError(t, eval.Evaluate(nil, &v)) + require.Nil(t, v) +} + +func TestVM_Evaluate_IdentifierExpr(t *testing.T) { + t.Run("Valid lookup", func(t *testing.T) { + scope := &vm.Scope{ + Variables: map[string]interface{}{ + "foobar": 15, + }, + } + + expr, err := parser.ParseExpression(`foobar`) + require.NoError(t, err) + + eval := vm.New(expr) + + var actual int + require.NoError(t, eval.Evaluate(scope, &actual)) + require.Equal(t, 15, actual) + }) + + t.Run("Invalid lookup", func(t *testing.T) { + expr, err := parser.ParseExpression(`foobar`) + require.NoError(t, err) + + eval := vm.New(expr) + + var v interface{} + err = eval.Evaluate(nil, &v) + require.EqualError(t, err, `1:1: identifier "foobar" does not exist`) + }) +} + +func TestVM_Evaluate_AccessExpr(t *testing.T) { + t.Run("Lookup optional field", func(t *testing.T) { + type Person struct { + Name string `river:"name,attr,optional"` + } + + scope := &vm.Scope{ + Variables: map[string]interface{}{ + "person": Person{}, + }, + } + + expr, err := parser.ParseExpression(`person.name`) + require.NoError(t, err) + + eval := vm.New(expr) + + var actual string + require.NoError(t, eval.Evaluate(scope, &actual)) + require.Equal(t, "", actual) + }) + + t.Run("Invalid lookup 1", func(t *testing.T) { + expr, err := parser.ParseExpression(`{ a = 15 }.b`) + require.NoError(t, err) + + eval := vm.New(expr) + + var v interface{} + err = eval.Evaluate(nil, &v) + require.EqualError(t, err, `1:12: field "b" does not exist`) + }) + + t.Run("Invalid lookup 2", func(t *testing.T) { + _, err := parser.ParseExpression(`{ a = 15 }.7`) + require.EqualError(t, err, `1:11: expected TERMINATOR, got FLOAT`) + }) + + t.Run("Invalid lookup 3", func(t *testing.T) { + _, err := parser.ParseExpression(`{ a = { b = 12 }.7 }.a.b`) + require.EqualError(t, err, `1:17: missing ',' in field list`) + }) + + t.Run("Invalid lookup 4", func(t *testing.T) { + _, err := parser.ParseExpression(`{ a = { b = 12 } }.a.b.7`) + require.EqualError(t, err, `1:23: expected TERMINATOR, got FLOAT`) + }) +} + +func trimWhitespace(in string) string { + f := token.NewFile("") + + s := scanner.New(f, []byte(in), nil, 0) + + var out strings.Builder + + for { + _, tok, lit := s.Scan() + if tok == token.EOF { + break + } + + if lit != "" { + _, _ = out.WriteString(lit) + } else { + _, _ = out.WriteString(tok.String()) + } + } + + return strings.TrimFunc(out.String(), unicode.IsSpace) +} From 50fcb6feca938550135775ee80beabf120c42a1e Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 1 Mar 2024 12:19:02 -0500 Subject: [PATCH 007/136] misc: replace as many user-facing references to River as possible (#18) Replace as many user-facing references to River with "syntax", "configuration", "Alloy configuration syntax", or "Alloy configuration" as much as possible. This is a first pass, and more passes will be needed closer to the 1.0 launch. --- .../controller/node_builtin_component.go | 4 +-- .../controller/node_config_argument.go | 2 +- .../internal/controller/node_config_export.go | 2 +- .../controller/node_config_logging.go | 2 +- .../controller/node_config_tracing.go | 2 +- .../controller/node_custom_component.go | 2 +- .../flow/internal/controller/node_service.go | 4 +-- .../flow/internal/importsource/import_file.go | 2 +- .../flow/internal/importsource/import_git.go | 2 +- .../flow/internal/importsource/import_http.go | 2 +- .../internal/importsource/import_string.go | 2 +- internal/flow/module.go | 4 +-- internal/flowmode/cmd_convert.go | 4 +-- internal/flowmode/cmd_fmt.go | 6 ++-- internal/flowmode/cmd_run.go | 5 ++-- internal/service/remotecfg/remotecfg_test.go | 2 +- syntax/ast/walk.go | 2 +- syntax/encoding/riverjson/riverjson.go | 10 +++---- syntax/internal/rivertags/rivertags.go | 28 +++++++++---------- syntax/token/builder/builder.go | 6 ++-- syntax/vm/op_binary.go | 6 ++-- syntax/vm/op_unary.go | 2 +- syntax/vm/struct_decoder.go | 12 ++++---- syntax/vm/vm.go | 20 ++++++------- 24 files changed, 67 insertions(+), 66 deletions(-) diff --git a/internal/flow/internal/controller/node_builtin_component.go b/internal/flow/internal/controller/node_builtin_component.go index 028ca23627..f646b608ba 100644 --- a/internal/flow/internal/controller/node_builtin_component.go +++ b/internal/flow/internal/controller/node_builtin_component.go @@ -224,7 +224,7 @@ func (cn *BuiltinComponentNode) NodeID() string { return cn.nodeID } // BuiltinComponentNode. func (cn *BuiltinComponentNode) UpdateBlock(b *ast.BlockStmt) { if !BlockComponentID(b).Equals(cn.id) { - panic("UpdateBlock called with an River block with a different component ID") + panic("UpdateBlock called with an block with a different component ID") } cn.mut.Lock() @@ -258,7 +258,7 @@ func (cn *BuiltinComponentNode) evaluate(scope *vm.Scope) error { argsPointer := cn.reg.CloneArguments() if err := cn.eval.Evaluate(scope, argsPointer); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } // args is always a pointer to the args type, so we want to deference it since diff --git a/internal/flow/internal/controller/node_config_argument.go b/internal/flow/internal/controller/node_config_argument.go index 5b979503f8..233b4580b4 100644 --- a/internal/flow/internal/controller/node_config_argument.go +++ b/internal/flow/internal/controller/node_config_argument.go @@ -53,7 +53,7 @@ func (cn *ArgumentConfigNode) Evaluate(scope *vm.Scope) error { var argument argumentBlock if err := cn.eval.Evaluate(scope, &argument); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } cn.defaultValue = argument.Default diff --git a/internal/flow/internal/controller/node_config_export.go b/internal/flow/internal/controller/node_config_export.go index 027335e629..891f044d4a 100644 --- a/internal/flow/internal/controller/node_config_export.go +++ b/internal/flow/internal/controller/node_config_export.go @@ -50,7 +50,7 @@ func (cn *ExportConfigNode) Evaluate(scope *vm.Scope) error { var export exportBlock if err := cn.eval.Evaluate(scope, &export); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } cn.value = export.Value return nil diff --git a/internal/flow/internal/controller/node_config_logging.go b/internal/flow/internal/controller/node_config_logging.go index f5757138bd..edee9f0dca 100644 --- a/internal/flow/internal/controller/node_config_logging.go +++ b/internal/flow/internal/controller/node_config_logging.go @@ -60,7 +60,7 @@ func (cn *LoggingConfigNode) Evaluate(scope *vm.Scope) error { args := logging.DefaultOptions if cn.eval != nil { if err := cn.eval.Evaluate(scope, &args); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } } diff --git a/internal/flow/internal/controller/node_config_tracing.go b/internal/flow/internal/controller/node_config_tracing.go index c8d77b46b8..9c1c35db77 100644 --- a/internal/flow/internal/controller/node_config_tracing.go +++ b/internal/flow/internal/controller/node_config_tracing.go @@ -60,7 +60,7 @@ func (cn *TracingConfigNode) Evaluate(scope *vm.Scope) error { args := tracing.DefaultOptions if cn.eval != nil { if err := cn.eval.Evaluate(scope, &args); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } } diff --git a/internal/flow/internal/controller/node_custom_component.go b/internal/flow/internal/controller/node_custom_component.go index 8b3e05a74e..742f27324f 100644 --- a/internal/flow/internal/controller/node_custom_component.go +++ b/internal/flow/internal/controller/node_custom_component.go @@ -181,7 +181,7 @@ func (cn *CustomComponentNode) evaluate(evalScope *vm.Scope) error { var args map[string]any if err := cn.eval.Evaluate(evalScope, &args); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } cn.args = args diff --git a/internal/flow/internal/controller/node_service.go b/internal/flow/internal/controller/node_service.go index e3746ac88b..e7eadaa498 100644 --- a/internal/flow/internal/controller/node_service.go +++ b/internal/flow/internal/controller/node_service.go @@ -64,7 +64,7 @@ func (sn *ServiceNode) Block() *ast.BlockStmt { // ServiceNode. func (sn *ServiceNode) UpdateBlock(b *ast.BlockStmt) { if b != nil && !BlockComponentID(b).Equals([]string{sn.NodeID()}) { - panic("UpdateBlock called with a River block with a different block ID") + panic("UpdateBlock called with a block with a different block ID") } sn.mut.Lock() @@ -97,7 +97,7 @@ func (sn *ServiceNode) Evaluate(scope *vm.Scope) error { argsPointer := reflect.New(reflect.TypeOf(sn.def.ConfigType)).Interface() if err := sn.eval.Evaluate(scope, argsPointer); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } // args is always a pointer to the args type, so we want to deference it diff --git a/internal/flow/internal/importsource/import_file.go b/internal/flow/internal/importsource/import_file.go index bc6690967e..9d919269f6 100644 --- a/internal/flow/internal/importsource/import_file.go +++ b/internal/flow/internal/importsource/import_file.go @@ -59,7 +59,7 @@ func (a *importFileConfigBlock) SetToDefault() { func (im *ImportFile) Evaluate(scope *vm.Scope) error { var arguments importFileConfigBlock if err := im.eval.Evaluate(scope, &arguments); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } if im.fileComponent == nil { var err error diff --git a/internal/flow/internal/importsource/import_git.go b/internal/flow/internal/importsource/import_git.go index 6c3490eb9b..2624b60f7b 100644 --- a/internal/flow/internal/importsource/import_git.go +++ b/internal/flow/internal/importsource/import_git.go @@ -74,7 +74,7 @@ func NewImportGit(managedOpts component.Options, eval *vm.Evaluator, onContentCh func (im *ImportGit) Evaluate(scope *vm.Scope) error { var arguments GitArguments if err := im.eval.Evaluate(scope, &arguments); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } if reflect.DeepEqual(im.args, arguments) { diff --git a/internal/flow/internal/importsource/import_http.go b/internal/flow/internal/importsource/import_http.go index 959f24fe8c..e7cfd90634 100644 --- a/internal/flow/internal/importsource/import_http.go +++ b/internal/flow/internal/importsource/import_http.go @@ -63,7 +63,7 @@ func (args *HTTPArguments) SetToDefault() { func (im *ImportHTTP) Evaluate(scope *vm.Scope) error { var arguments HTTPArguments if err := im.eval.Evaluate(scope, &arguments); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } if im.managedRemoteHTTP == nil { var err error diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index fc07583627..b490137cf5 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -33,7 +33,7 @@ type importStringConfigBlock struct { func (im *ImportString) Evaluate(scope *vm.Scope) error { var arguments importStringConfigBlock if err := im.eval.Evaluate(scope, &arguments); err != nil { - return fmt.Errorf("decoding River: %w", err) + return fmt.Errorf("decoding configuration: %w", err) } if reflect.DeepEqual(im.arguments, arguments) { diff --git a/internal/flow/module.go b/internal/flow/module.go index edc3c23852..9c99c62fc7 100644 --- a/internal/flow/module.go +++ b/internal/flow/module.go @@ -40,7 +40,7 @@ func newModuleController(o *moduleControllerOptions) controller.ModuleController // NewModule creates a new, unstarted Module. func (m *moduleController) NewModule(id string, export component.ExportFunc) (component.Module, error) { if id != "" && !scanner.IsValidIdentifier(id) { - return nil, fmt.Errorf("module ID %q is not a valid River identifier", id) + return nil, fmt.Errorf("module ID %q is not a valid identifier", id) } m.mut.Lock() @@ -63,7 +63,7 @@ func (m *moduleController) NewModule(id string, export component.ExportFunc) (co // NewCustomComponent creates a new, unstarted CustomComponent. func (m *moduleController) NewCustomComponent(id string, export component.ExportFunc) (controller.CustomComponent, error) { if id != "" && !scanner.IsValidIdentifier(id) { - return nil, fmt.Errorf("customComponent ID %q is not a valid River identifier", id) + return nil, fmt.Errorf("customComponent ID %q is not a valid identifier", id) } m.mut.Lock() diff --git a/internal/flowmode/cmd_convert.go b/internal/flowmode/cmd_convert.go index bffc9ef1f3..e3c703f310 100644 --- a/internal/flowmode/cmd_convert.go +++ b/internal/flowmode/cmd_convert.go @@ -26,9 +26,9 @@ func convertCommand() *cobra.Command { cmd := &cobra.Command{ Use: "convert [flags] [file]", - Short: "Convert a supported config file to River", + Short: "Convert a supported config file to Alloy", Long: `The convert subcommand translates a supported config file to -a River configuration file. +an Alloy configuration file. If the file argument is not supplied or if the file argument is "-", then convert will read from stdin. diff --git a/internal/flowmode/cmd_fmt.go b/internal/flowmode/cmd_fmt.go index ba3885c2d5..caacc9cd4b 100644 --- a/internal/flowmode/cmd_fmt.go +++ b/internal/flowmode/cmd_fmt.go @@ -21,9 +21,9 @@ func fmtCommand() *cobra.Command { cmd := &cobra.Command{ Use: "fmt [flags] file", - Short: "Format a River file", - Long: `The fmt subcommand applies standard formatting rules to the specified -River configuration file. + Short: "Format a configuration file", + Long: `The fmt subcommand applies standard formatting rules to the +specified configuration file. If the file argument is not supplied or if the file argument is "-", then fmt will read from stdin. diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index 3bbd9889a3..0d7ae56aa9 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -68,8 +68,9 @@ func runCommand() *cobra.Command { Long: `The run subcommand runs Grafana Agent Flow in the foreground until an interrupt is received. -run must be provided an argument pointing at the River dir/file-path to use. If the -River dir/file-path wasn't specified, can't be loaded, or contains errors, run will exit +run must be provided an argument pointing at the Alloy configuration +dirirectory or file path to use. If the configuration directory or file path +wasn't specified, can't be loaded, or contains errors, run will exit immediately. If path is a directory, all *.river files in that directory will be combined diff --git a/internal/service/remotecfg/remotecfg_test.go b/internal/service/remotecfg/remotecfg_test.go index a370419369..24ba779193 100644 --- a/internal/service/remotecfg/remotecfg_test.go +++ b/internal/service/remotecfg/remotecfg_test.go @@ -42,7 +42,7 @@ func TestOnDiskCache(t *testing.T) { env.svc.asClient = client // Mock client to return an unparseable response. - client.getConfigFunc = buildGetConfigHandler("unparseable river config") + client.getConfigFunc = buildGetConfigHandler("unparseable config") // Write the cache contents, and run the service. err := os.WriteFile(env.svc.dataPath, []byte(cacheContents), 0644) diff --git a/syntax/ast/walk.go b/syntax/ast/walk.go index df3f82d9a3..8620a9f411 100644 --- a/syntax/ast/walk.go +++ b/syntax/ast/walk.go @@ -66,7 +66,7 @@ func Walk(v Visitor, node Node) { case *ParenExpr: Walk(v, n.Inner) default: - panic(fmt.Sprintf("river/ast: unexpected node type %T", n)) + panic(fmt.Sprintf("syntax/ast: unexpected node type %T", n)) } v.Visit(nil) diff --git a/syntax/encoding/riverjson/riverjson.go b/syntax/encoding/riverjson/riverjson.go index fc69882918..4c42d2803e 100644 --- a/syntax/encoding/riverjson/riverjson.go +++ b/syntax/encoding/riverjson/riverjson.go @@ -61,7 +61,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { case reflect.Map: if rv.Type().Key().Kind() != reflect.String { - panic("river/encoding/riverjson: unsupported map type; expected map[string]T, got " + rv.Type().String()) + panic("syntax/encoding/riverjson: unsupported map type; expected map[string]T, got " + rv.Type().String()) } iter := rv.MapRange() @@ -76,7 +76,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { } default: - panic(fmt.Sprintf("river/encoding/riverjson: can only encode struct or map[string]T values to bodies, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/encoding/riverjson: can only encode struct or map[string]T values to bodies, got %s", rv.Kind())) } return body @@ -111,7 +111,7 @@ func encodeFieldAsStatements(prefix []string, field rivertags.Field, fieldValue // Iterate over the map and add each element as an attribute into it. if fieldValue.Type().Key().Kind() != reflect.String { - panic("river/encoding/riverjson: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) + panic("syntax/encoding/riverjson: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) } statements := []jsonStatement{} @@ -182,7 +182,7 @@ func encodeFieldAsStatements(prefix []string, field rivertags.Field, fieldValue return statements default: - panic(fmt.Sprintf("river/encoding/riverjson: unrecognized enum kind %s", fieldValue.Kind())) + panic(fmt.Sprintf("syntax/encoding/riverjson: unrecognized enum kind %s", fieldValue.Kind())) } } @@ -308,6 +308,6 @@ func buildJSONValue(v value.Value) jsonValue { return jsonValue{Type: "capsule", Value: v.Describe()} default: - panic(fmt.Sprintf("river/encoding/riverjson: unrecognized value type %q", v.Type())) + panic(fmt.Sprintf("syntax/encoding/riverjson: unrecognized value type %q", v.Type())) } } diff --git a/syntax/internal/rivertags/rivertags.go b/syntax/internal/rivertags/rivertags.go index 8186a644e0..dcd2115d9f 100644 --- a/syntax/internal/rivertags/rivertags.go +++ b/syntax/internal/rivertags/rivertags.go @@ -148,7 +148,7 @@ func (f Field) IsLabel() bool { return f.Flags&FlagLabel != 0 } // `river:",label"` tags, which must be strings. func Get(ty reflect.Type) []Field { if k := ty.Kind(); k != reflect.Struct { - panic(fmt.Sprintf("rivertags: Get requires struct kind, got %s", k)) + panic(fmt.Sprintf("syntaxtags: Get requires struct kind, got %s", k)) } var ( @@ -161,7 +161,7 @@ func Get(ty reflect.Type) []Field { for _, field := range reflect.VisibleFields(ty) { // River does not support embedding of fields if field.Anonymous { - panic(fmt.Sprintf("river: anonymous fields not supported %s", printPathToField(ty, field.Index))) + panic(fmt.Sprintf("syntax: anonymous fields not supported %s", printPathToField(ty, field.Index))) } tag, tagged := field.Tag.Lookup("river") @@ -170,15 +170,15 @@ func Get(ty reflect.Type) []Field { } if !field.IsExported() { - panic(fmt.Sprintf("river: river tag found on unexported field at %s", printPathToField(ty, field.Index))) + panic(fmt.Sprintf("syntax: river tag found on unexported field at %s", printPathToField(ty, field.Index))) } options := strings.SplitN(tag, ",", 2) if len(options) == 0 { - panic(fmt.Sprintf("river: unsupported empty tag at %s", printPathToField(ty, field.Index))) + panic(fmt.Sprintf("syntax: unsupported empty tag at %s", printPathToField(ty, field.Index))) } if len(options) != 2 { - panic(fmt.Sprintf("river: field %s tag is missing options", printPathToField(ty, field.Index))) + panic(fmt.Sprintf("syntax: field %s tag is missing options", printPathToField(ty, field.Index))) } fullName := options[0] @@ -189,18 +189,18 @@ func Get(ty reflect.Type) []Field { } if first, used := usedNames[fullName]; used && fullName != "" { - panic(fmt.Sprintf("river: field name %s already used by %s", fullName, printPathToField(ty, first))) + panic(fmt.Sprintf("syntax: field name %s already used by %s", fullName, printPathToField(ty, first))) } usedNames[fullName] = tf.Index flags, ok := parseFlags(options[1]) if !ok { - panic(fmt.Sprintf("river: unrecognized river tag format %q at %s", tag, printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: unrecognized river tag format %q at %s", tag, printPathToField(ty, tf.Index))) } tf.Flags = flags if len(tf.Name) > 1 && tf.Flags&(FlagBlock|FlagEnum) == 0 { - panic(fmt.Sprintf("river: field names with `.` may only be used by blocks or enums (found at %s)", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: field names with `.` may only be used by blocks or enums (found at %s)", printPathToField(ty, tf.Index))) } if tf.Flags&FlagEnum != 0 { @@ -211,21 +211,21 @@ func Get(ty reflect.Type) []Field { if tf.Flags&FlagLabel != 0 { if fullName != "" { - panic(fmt.Sprintf("river: label field at %s must not have a name", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: label field at %s must not have a name", printPathToField(ty, tf.Index))) } if field.Type.Kind() != reflect.String { - panic(fmt.Sprintf("river: label field at %s must be a string", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: label field at %s must be a string", printPathToField(ty, tf.Index))) } if usedLabelField != nil { - panic(fmt.Sprintf("river: label field already used by %s", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: label field already used by %s", printPathToField(ty, tf.Index))) } usedLabelField = tf.Index } if tf.Flags&FlagSquash != 0 { if fullName != "" { - panic(fmt.Sprintf("river: squash field at %s must not have a name", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: squash field at %s must not have a name", printPathToField(ty, tf.Index))) } innerType := deferenceType(field.Type) @@ -245,14 +245,14 @@ func Get(ty reflect.Type) []Field { } default: - panic(fmt.Sprintf("rivertags: squash field requires struct, got %s", innerType)) + panic(fmt.Sprintf("syntaxtags: squash field requires struct, got %s", innerType)) } continue } if fullName == "" && tf.Flags&(FlagLabel|FlagSquash) == 0 /* (e.g., *not* a label or squash) */ { - panic(fmt.Sprintf("river: non-empty field name required at %s", printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntaxtags: non-empty field name required at %s", printPathToField(ty, tf.Index))) } fields = append(fields, tf) diff --git a/syntax/token/builder/builder.go b/syntax/token/builder/builder.go index 1dc9b5d62b..ed1fe375db 100644 --- a/syntax/token/builder/builder.go +++ b/syntax/token/builder/builder.go @@ -174,7 +174,7 @@ func (b *Body) encodeFields(rv reflect.Value) { rv = rv.Elem() } if rv.Kind() != reflect.Struct { - panic(fmt.Sprintf("river/token/builder: can only encode struct values to bodies, got %s", rv.Type())) + panic(fmt.Sprintf("syntax/token/builder: can only encode struct values to bodies, got %s", rv.Type())) } fields := rivertags.Get(rv.Type()) @@ -223,7 +223,7 @@ func (b *Body) encodeField(prefix []string, field rivertags.Field, fieldValue re case fieldValue.Kind() == reflect.Map: // Iterate over the map and add each element as an attribute into it. if fieldValue.Type().Key().Kind() != reflect.String { - panic("river/token/builder: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) + panic("syntax/token/builder: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) } inner := NewBlock(fullName, "") @@ -267,7 +267,7 @@ func (b *Body) encodeField(prefix []string, field rivertags.Field, fieldValue re } default: - panic(fmt.Sprintf("river/token/builder: unrecognized enum kind %s", fieldValue.Kind())) + panic(fmt.Sprintf("syntax/token/builder: unrecognized enum kind %s", fieldValue.Kind())) } } } diff --git a/syntax/vm/op_binary.go b/syntax/vm/op_binary.go index e329f6fc49..75f53b3a4e 100644 --- a/syntax/vm/op_binary.go +++ b/syntax/vm/op_binary.go @@ -204,7 +204,7 @@ func evalBinop(lhs value.Value, op token.Token, rhs value.Value) (value.Value, e } } - panic("river/vm: unreachable") + panic("syntax/vm: unreachable") } // tryUnwrapOptionalSecret accepts a value and, if it is a @@ -294,7 +294,7 @@ func valuesEqual(lhs value.Value, rhs value.Value) bool { return reflect.DeepEqual(lhs.Interface(), rhs.Interface()) } - panic("river/vm: unreachable") + panic("syntax/vm: unreachable") } // binopAllowedTypes maps what type of values are permitted for a specific @@ -322,7 +322,7 @@ var binopAllowedTypes = map[token.Token][]value.Type{ func acceptableBinopType(val value.Value, op token.Token) bool { allowed, ok := binopAllowedTypes[op] if !ok { - panic("river/vm: unexpected binop type") + panic("syntax/vm: unexpected binop type") } actualType := val.Type() diff --git a/syntax/vm/op_unary.go b/syntax/vm/op_unary.go index bc116d58bc..9e0ffbffc2 100644 --- a/syntax/vm/op_unary.go +++ b/syntax/vm/op_unary.go @@ -29,5 +29,5 @@ func evalUnaryOp(op token.Token, val value.Value) (value.Value, error) { } } - panic("river/vm: unreachable") + panic("syntax/vm: unreachable") } diff --git a/syntax/vm/struct_decoder.go b/syntax/vm/struct_decoder.go index 99a0a2358a..1c0cfdb9ee 100644 --- a/syntax/vm/struct_decoder.go +++ b/syntax/vm/struct_decoder.go @@ -25,7 +25,7 @@ func (st *structDecoder) Decode(stmts ast.Body, rv reflect.Value) error { // TODO(rfratto): potentially loosen this restriction and allow decoding into // an interface{} or map[string]interface{}. if rv.Kind() != reflect.Struct { - panic(fmt.Sprintf("river/vm: structDecoder expects struct, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/vm: structDecoder expects struct, got %s", rv.Kind())) } state := decodeOptions{ @@ -79,7 +79,7 @@ func (st *structDecoder) Decode(stmts ast.Body, rv reflect.Value) error { } default: - panic(fmt.Sprintf("river/vm: unrecognized node type %T", stmt)) + panic(fmt.Sprintf("syntax/vm: unrecognized node type %T", stmt)) } } @@ -228,7 +228,7 @@ func (st *structDecoder) decodeNormalBlock(fullName string, block *ast.BlockStmt blockIndex, ok := state.BlockIndex[block] if !ok { - panic("river/vm: block not found in index lookup table") + panic("syntax/vm: block not found in index lookup table") } decodeElement := prepareDecodeValue(decodeField.Index(blockIndex)) err := st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeElement) @@ -254,7 +254,7 @@ func (st *structDecoder) decodeNormalBlock(fullName string, block *ast.BlockStmt blockIndex, ok := state.BlockIndex[block] if !ok { - panic("river/vm: block not found in index lookup table") + panic("syntax/vm: block not found in index lookup table") } decodeElement := prepareDecodeValue(decodeField.Index(blockIndex)) err := st.VM.evaluateBlockOrBody(st.Scope, st.Assoc, block, decodeElement) @@ -297,7 +297,7 @@ func (st *structDecoder) decodeEnumBlock(fullName string, block *ast.BlockStmt, decodeField := prepareDecodeValue(enumField) if decodeField.Kind() != reflect.Slice { - panic("river/vm: enum field must be a slice kind, got " + decodeField.Kind().String()) + panic("syntax/vm: enum field must be a slice kind, got " + decodeField.Kind().String()) } // If this is the first time we've seen the enum, reset its length to zero. @@ -310,7 +310,7 @@ func (st *structDecoder) decodeEnumBlock(fullName string, block *ast.BlockStmt, // Prepare the enum element to decode into. enumIndex, ok := state.EnumIndex[block] if !ok { - panic("river/vm: enum block not found in index lookup table") + panic("syntax/vm: enum block not found in index lookup table") } enumElement := prepareDecodeValue(decodeField.Index(enumIndex)) diff --git a/syntax/vm/vm.go b/syntax/vm/vm.go index a9c6481593..205f561efe 100644 --- a/syntax/vm/vm.go +++ b/syntax/vm/vm.go @@ -53,19 +53,19 @@ func (vm *Evaluator) Evaluate(scope *Scope, v interface{}) (err error) { case *ast.BlockStmt, ast.Body: rv := reflect.ValueOf(v) if rv.Kind() != reflect.Pointer { - panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/vm: expected pointer, got %s", rv.Kind())) } return vm.evaluateBlockOrBody(scope, assoc, node, rv) case *ast.File: rv := reflect.ValueOf(v) if rv.Kind() != reflect.Pointer { - panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/vm: expected pointer, got %s", rv.Kind())) } return vm.evaluateBlockOrBody(scope, assoc, node.Body, rv) default: expr, ok := node.(ast.Expr) if !ok { - panic(fmt.Sprintf("river/vm: unexpected value type %T", node)) + panic(fmt.Sprintf("syntax/vm: unexpected value type %T", node)) } val, err := vm.evaluateExpr(scope, assoc, expr) if err != nil { @@ -108,7 +108,7 @@ func (vm *Evaluator) evaluateUnmarshalRiver(scope *Scope, assoc map[value.Value] return ru.UnmarshalRiver(func(v interface{}) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Pointer { - panic(fmt.Sprintf("river/vm: expected pointer, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/vm: expected pointer, got %s", rv.Kind())) } return vm.evaluateBlockOrBody(scope, assoc, node, rv.Elem()) }), true @@ -141,7 +141,7 @@ func (vm *Evaluator) evaluateDecode(scope *Scope, assoc map[value.Value]ast.Node } else if rv.Kind() == reflect.Map { return vm.evaluateMap(scope, assoc, node, rv) } else if rv.Kind() != reflect.Struct { - panic(fmt.Sprintf("river/vm: can only evaluate blocks into structs, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/vm: can only evaluate blocks into structs, got %s", rv.Kind())) } ti := getCachedTagInfo(rv.Type()) @@ -157,7 +157,7 @@ func (vm *Evaluator) evaluateDecode(scope *Scope, assoc map[value.Value]ast.Node case ast.Body: stmts = node default: - panic(fmt.Sprintf("river/vm: unrecognized node type %T", node)) + panic(fmt.Sprintf("syntax/vm: unrecognized node type %T", node)) } sd := structDecoder{ @@ -187,7 +187,7 @@ func (vm *Evaluator) evaluateMap(scope *Scope, assoc map[value.Value]ast.Node, n case ast.Body: stmts = node default: - panic(fmt.Sprintf("river/vm: unrecognized node type %T", node)) + panic(fmt.Sprintf("syntax/vm: unrecognized node type %T", node)) } if rv.IsNil() { @@ -221,7 +221,7 @@ func (vm *Evaluator) evaluateMap(scope *Scope, assoc map[value.Value]ast.Node, n } default: - panic(fmt.Sprintf("river/vm: unrecognized node type %T", stmt)) + panic(fmt.Sprintf("syntax/vm: unrecognized node type %T", stmt)) } } @@ -274,7 +274,7 @@ func (vm *Evaluator) evaluateBlockLabel(node *ast.BlockStmt, tfs []rivertags.Fie ) if !reflect.TypeOf(node.Label).AssignableTo(fieldType) { // The Label struct field needs to be a string. - panic(fmt.Sprintf("river/vm: cannot assign block label to non-string type %s", fieldType)) + panic(fmt.Sprintf("syntax/vm: cannot assign block label to non-string type %s", fieldType)) } field.Set(reflect.ValueOf(node.Label)) return nil @@ -449,7 +449,7 @@ func (vm *Evaluator) evaluateExpr(scope *Scope, assoc map[value.Value]ast.Node, return funcVal.Call(args...) default: - panic(fmt.Sprintf("river/vm: unexpected ast.Expr type %T", expr)) + panic(fmt.Sprintf("syntax/vm: unexpected ast.Expr type %T", expr)) } } From f7e3998b0fb3738e34c228b101964a9759603152 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 1 Mar 2024 12:31:33 -0500 Subject: [PATCH 008/136] misc: rename rivertags packages to syntaxtags (#19) This commit renames rivertags packages to syntaxtags, and replaces as many references of "River" to "syntax" or "Alloy syntax" as possible. This is a safe change for syntax/internal/rivertags as it is an internal package and cannot be imported from the top-level module. The tag names themselves have not yet changed, so those are left untouched. --- .../rivertags.go => syntaxtags/syntaxtags.go} | 46 ++++++------- internal/cmd/agentlint/main.go | 4 +- syntax/encoding/riverjson/riverjson.go | 12 ++-- syntax/internal/reflectutil/walk.go | 6 +- syntax/internal/reflectutil/walk_test.go | 8 +-- .../rivertags.go => syntaxtags/syntaxtags.go} | 8 +-- .../syntaxtags_test.go} | 66 +++++++++---------- syntax/internal/value/tag_cache.go | 20 +++--- syntax/token/builder/builder.go | 14 ++-- syntax/vm/struct_decoder.go | 4 +- syntax/vm/tag_cache.go | 14 ++-- syntax/vm/vm.go | 8 +-- 12 files changed, 105 insertions(+), 105 deletions(-) rename internal/cmd/agentlint/internal/{rivertags/rivertags.go => syntaxtags/syntaxtags.go} (88%) rename syntax/internal/{rivertags/rivertags.go => syntaxtags/syntaxtags.go} (98%) rename syntax/internal/{rivertags/rivertags_test.go => syntaxtags/syntaxtags_test.go} (62%) diff --git a/internal/cmd/agentlint/internal/rivertags/rivertags.go b/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go similarity index 88% rename from internal/cmd/agentlint/internal/rivertags/rivertags.go rename to internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go index 23f7564744..1673861c6d 100644 --- a/internal/cmd/agentlint/internal/rivertags/rivertags.go +++ b/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go @@ -1,5 +1,5 @@ -// Package rivertags exposes an Analyzer which lints river tags. -package rivertags +// Package syntaxtags exposes an Analyzer which lints Alloy syntax tags. +package syntaxtags import ( "fmt" @@ -12,17 +12,17 @@ import ( ) var Analyzer = &analysis.Analyzer{ - Name: "rivertags", - Doc: "perform validation checks on River tags", + Name: "syntaxtags", + Doc: "perform validation checks on Alloy syntax tags", Run: run, } var noLintRegex = regexp.MustCompile(`//\s*nolint:(\S+)`) var ( - riverTagRegex = regexp.MustCompile(`river:"([^"]*)"`) - jsonTagRegex = regexp.MustCompile(`json:"([^"]*)"`) - yamlTagRegex = regexp.MustCompile(`yaml:"([^"]*)"`) + syntaxTagRegex = regexp.MustCompile(`river:"([^"]*)"`) + jsonTagRegex = regexp.MustCompile(`json:"([^"]*)"`) + yamlTagRegex = regexp.MustCompile(`yaml:"([^"]*)"`) ) // Rules for river tag linting: @@ -53,12 +53,12 @@ func run(p *analysis.Pass) (interface{}, error) { sNode := sInfo.Node s := sInfo.Type - var hasRiverTags bool + var hasSyntaxTags bool for i := 0; i < s.NumFields(); i++ { - matches := riverTagRegex.FindAllStringSubmatch(s.Tag(i), -1) + matches := syntaxTagRegex.FindAllStringSubmatch(s.Tag(i), -1) if len(matches) > 0 { - hasRiverTags = true + hasSyntaxTags = true break } } @@ -68,7 +68,7 @@ func run(p *analysis.Pass) (interface{}, error) { field := s.Field(i) nodeField := lookupField(sNode, i) - // Ignore fields with //nolint:rivertags in them. + // Ignore fields with //nolint:syntaxtags in them. if comments := nodeField.Comment; comments != nil { for _, comment := range comments.List { if lintingDisabled(comment.Text) { @@ -77,8 +77,8 @@ func run(p *analysis.Pass) (interface{}, error) { } } - matches := riverTagRegex.FindAllStringSubmatch(s.Tag(i), -1) - if len(matches) == 0 && hasRiverTags { + matches := syntaxTagRegex.FindAllStringSubmatch(s.Tag(i), -1) + if len(matches) == 0 && hasSyntaxTags { // If this struct has River tags, but this field only has json/yaml // tags, emit an error. jsonMatches := jsonTagRegex.FindAllStringSubmatch(s.Tag(i), -1) @@ -87,7 +87,7 @@ func run(p *analysis.Pass) (interface{}, error) { if len(jsonMatches) > 0 || len(yamlMatches) > 0 { p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: "field has yaml or json tags, but no river tags", }) } @@ -98,7 +98,7 @@ func run(p *analysis.Pass) (interface{}, error) { } else if len(matches) > 1 { p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: "field should not have more than one river tag", }) } @@ -107,14 +107,14 @@ func run(p *analysis.Pass) (interface{}, error) { if field.Anonymous() { p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: "river tags may not be given to anonymous fields", }) } if !field.Exported() { p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: "river tags may only be given to exported fields", }) } @@ -122,17 +122,17 @@ func run(p *analysis.Pass) (interface{}, error) { // Report "a, b, c int `river:"name,attr"`" as invalid usage. p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: "river tags should not be inserted on field names separated by commas", }) } for _, match := range matches { - diagnostics := lintRiverTag(field, match[1]) + diagnostics := lintSyntaxTag(field, match[1]) for _, diag := range diagnostics { p.Report(analysis.Diagnostic{ Pos: field.Pos(), - Category: "rivertags", + Category: "syntaxtags", Message: diag, }) } @@ -149,7 +149,7 @@ func lintingDisabled(comment string) bool { for _, match := range matches { // Iterate over A,B,C by comma and see if our linter is included. for _, disabledLinter := range strings.Split(match[1], ",") { - if disabledLinter == "rivertags" { + if disabledLinter == "syntaxtags" { return true } } @@ -216,7 +216,7 @@ type structInfo struct { Type *types.Struct } -func lintRiverTag(ty *types.Var, tag string) (diagnostics []string) { +func lintSyntaxTag(ty *types.Var, tag string) (diagnostics []string) { if tag == "" { diagnostics = append(diagnostics, "river tag should not be empty") return @@ -319,7 +319,7 @@ var fieldNameRegex = regexp.MustCompile("^[a-z][a-z0-9_]*$") func validateFieldName(name string) (diagnostics []string) { if !fieldNameRegex.MatchString(name) { - msg := fmt.Sprintf("%q must be a valid river snake_case identifier", name) + msg := fmt.Sprintf("%q must be a valid syntax snake_case identifier", name) diagnostics = append(diagnostics, msg) } diff --git a/internal/cmd/agentlint/main.go b/internal/cmd/agentlint/main.go index d580d4a324..551642f3ae 100644 --- a/internal/cmd/agentlint/main.go +++ b/internal/cmd/agentlint/main.go @@ -4,13 +4,13 @@ package main import ( "github.com/grafana/agent/internal/cmd/agentlint/internal/findcomponents" - "github.com/grafana/agent/internal/cmd/agentlint/internal/rivertags" + "github.com/grafana/agent/internal/cmd/agentlint/internal/syntaxtags" "golang.org/x/tools/go/analysis/multichecker" ) func main() { multichecker.Main( findcomponents.Analyzer, - rivertags.Analyzer, + syntaxtags.Analyzer, ) } diff --git a/syntax/encoding/riverjson/riverjson.go b/syntax/encoding/riverjson/riverjson.go index 4c42d2803e..7b4ef76dc3 100644 --- a/syntax/encoding/riverjson/riverjson.go +++ b/syntax/encoding/riverjson/riverjson.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" "github.com/grafana/river/internal/value" "github.com/grafana/river/token/builder" ) @@ -39,7 +39,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { switch rv.Kind() { case reflect.Struct: - fields := rivertags.Get(rv.Type()) + fields := syntaxtags.Get(rv.Type()) defaults := reflect.New(rv.Type()).Elem() if defaults.CanAddr() && defaults.Addr().Type().Implements(goRiverDefaulter) { defaults.Addr().Interface().(value.Defaulter).SetToDefault() @@ -85,7 +85,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { // encodeFieldAsStatements encodes an individual field from a struct as a set // of statements. One field may map to multiple statements in the case of a // slice of blocks. -func encodeFieldAsStatements(prefix []string, field rivertags.Field, fieldValue reflect.Value) []jsonStatement { +func encodeFieldAsStatements(prefix []string, field syntaxtags.Field, fieldValue reflect.Value) []jsonStatement { fieldName := strings.Join(field.Name, ".") for fieldValue.Kind() == reflect.Pointer { @@ -204,9 +204,9 @@ func mergeStringSlice(a, b []string) []string { // getBlockLabel returns the label for a given block. func getBlockLabel(rv reflect.Value) string { - tags := rivertags.Get(rv.Type()) + tags := syntaxtags.Get(rv.Type()) for _, tag := range tags { - if tag.Flags&rivertags.FlagLabel != 0 { + if tag.Flags&syntaxtags.FlagLabel != 0 { return reflectutil.Get(rv, tag).String() } } @@ -222,7 +222,7 @@ func encodeEnumElementToStatements(prefix []string, enumElement reflect.Value) [ enumElement = enumElement.Elem() } - fields := rivertags.Get(enumElement.Type()) + fields := syntaxtags.Get(enumElement.Type()) statements := []jsonStatement{} diff --git a/syntax/internal/reflectutil/walk.go b/syntax/internal/reflectutil/walk.go index 17cbb25d49..ff7f9b927b 100644 --- a/syntax/internal/reflectutil/walk.go +++ b/syntax/internal/reflectutil/walk.go @@ -3,12 +3,12 @@ package reflectutil import ( "reflect" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" ) // GetOrAlloc returns the nested field of value corresponding to index. // GetOrAlloc panics if not given a struct. -func GetOrAlloc(value reflect.Value, field rivertags.Field) reflect.Value { +func GetOrAlloc(value reflect.Value, field syntaxtags.Field) reflect.Value { return GetOrAllocIndex(value, field.Index) } @@ -51,7 +51,7 @@ func deferencePointer(value reflect.Value) reflect.Value { // It is similar to [reflect/Value.FieldByIndex] but can handle traversing // through nil pointers. If Get traverses through a nil pointer, a non-settable // zero value for the final field is returned. -func Get(value reflect.Value, field rivertags.Field) reflect.Value { +func Get(value reflect.Value, field syntaxtags.Field) reflect.Value { if len(field.Index) == 1 { return value.Field(field.Index[0]) } diff --git a/syntax/internal/reflectutil/walk_test.go b/syntax/internal/reflectutil/walk_test.go index f536770e5a..8381125d9e 100644 --- a/syntax/internal/reflectutil/walk_test.go +++ b/syntax/internal/reflectutil/walk_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -25,7 +25,7 @@ func TestDeeplyNested_Access(t *testing.T) { s.Field1.Field2.Field3.Value = "Hello, world!" rv := reflect.ValueOf(&s).Elem() - innerValue := reflectutil.GetOrAlloc(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + innerValue := reflectutil.GetOrAlloc(rv, syntaxtags.Field{Index: []int{0, 0, 0, 0}}) assert.True(t, innerValue.CanSet()) assert.Equal(t, reflect.String, innerValue.Kind()) } @@ -44,7 +44,7 @@ func TestDeeplyNested_Allocate(t *testing.T) { var s Struct rv := reflect.ValueOf(&s).Elem() - innerValue := reflectutil.GetOrAlloc(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + innerValue := reflectutil.GetOrAlloc(rv, syntaxtags.Field{Index: []int{0, 0, 0, 0}}) require.True(t, innerValue.CanSet()) require.Equal(t, reflect.String, innerValue.Kind()) @@ -66,7 +66,7 @@ func TestDeeplyNested_NoAllocate(t *testing.T) { var s Struct rv := reflect.ValueOf(&s).Elem() - innerValue := reflectutil.Get(rv, rivertags.Field{Index: []int{0, 0, 0, 0}}) + innerValue := reflectutil.Get(rv, syntaxtags.Field{Index: []int{0, 0, 0, 0}}) assert.False(t, innerValue.CanSet()) assert.Equal(t, reflect.String, innerValue.Kind()) } diff --git a/syntax/internal/rivertags/rivertags.go b/syntax/internal/syntaxtags/syntaxtags.go similarity index 98% rename from syntax/internal/rivertags/rivertags.go rename to syntax/internal/syntaxtags/syntaxtags.go index dcd2115d9f..3c1f8f859b 100644 --- a/syntax/internal/rivertags/rivertags.go +++ b/syntax/internal/syntaxtags/syntaxtags.go @@ -1,6 +1,6 @@ -// Package rivertags decodes a struct type into river object -// and structural tags. -package rivertags +// Package syntaxtags decodes a struct type into syntax object and structural +// tags. +package syntaxtags import ( "fmt" @@ -159,7 +159,7 @@ func Get(ty reflect.Type) []Field { ) for _, field := range reflect.VisibleFields(ty) { - // River does not support embedding of fields + // Alloy's syntax does not support embedding of fields if field.Anonymous { panic(fmt.Sprintf("syntax: anonymous fields not supported %s", printPathToField(ty, field.Index))) } diff --git a/syntax/internal/rivertags/rivertags_test.go b/syntax/internal/syntaxtags/syntaxtags_test.go similarity index 62% rename from syntax/internal/rivertags/rivertags_test.go rename to syntax/internal/syntaxtags/syntaxtags_test.go index 43370b33b4..654512d791 100644 --- a/syntax/internal/rivertags/rivertags_test.go +++ b/syntax/internal/syntaxtags/syntaxtags_test.go @@ -1,10 +1,10 @@ -package rivertags_test +package syntaxtags_test import ( "reflect" "testing" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,16 +22,16 @@ func Test_Get(t *testing.T) { Label string `river:",label"` } - fs := rivertags.Get(reflect.TypeOf(Struct{})) + fs := syntaxtags.Get(reflect.TypeOf(Struct{})) - expect := []rivertags.Field{ - {[]string{"req_attr"}, []int{1}, rivertags.FlagAttr}, - {[]string{"opt_attr"}, []int{2}, rivertags.FlagAttr | rivertags.FlagOptional}, - {[]string{"req_block"}, []int{3}, rivertags.FlagBlock}, - {[]string{"opt_block"}, []int{4}, rivertags.FlagBlock | rivertags.FlagOptional}, - {[]string{"req_enum"}, []int{5}, rivertags.FlagEnum}, - {[]string{"opt_enum"}, []int{6}, rivertags.FlagEnum | rivertags.FlagOptional}, - {[]string{""}, []int{7}, rivertags.FlagLabel}, + expect := []syntaxtags.Field{ + {[]string{"req_attr"}, []int{1}, syntaxtags.FlagAttr}, + {[]string{"opt_attr"}, []int{2}, syntaxtags.FlagAttr | syntaxtags.FlagOptional}, + {[]string{"req_block"}, []int{3}, syntaxtags.FlagBlock}, + {[]string{"opt_block"}, []int{4}, syntaxtags.FlagBlock | syntaxtags.FlagOptional}, + {[]string{"req_enum"}, []int{5}, syntaxtags.FlagEnum}, + {[]string{"opt_enum"}, []int{6}, syntaxtags.FlagEnum | syntaxtags.FlagOptional}, + {[]string{""}, []int{7}, syntaxtags.FlagLabel}, } require.Equal(t, expect, fs) @@ -48,7 +48,7 @@ func TestEmbedded(t *testing.T) { InnerStruct Field2 string `river:"parent_field_2,attr"` } - require.PanicsWithValue(t, "river: anonymous fields not supported rivertags_test.Struct.InnerStruct", func() { rivertags.Get(reflect.TypeOf(Struct{})) }) + require.PanicsWithValue(t, "syntax: anonymous fields not supported syntaxtags_test.Struct.InnerStruct", func() { syntaxtags.Get(reflect.TypeOf(Struct{})) }) } func TestSquash(t *testing.T) { @@ -69,33 +69,33 @@ func TestSquash(t *testing.T) { Field2 string `river:"parent_field_2,attr"` } - expect := []rivertags.Field{ + expect := []syntaxtags.Field{ { Name: []string{"parent_field_1"}, Index: []int{0}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, { Name: []string{"inner_field_1"}, Index: []int{1, 0}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, { Name: []string{"inner_field_2"}, Index: []int{1, 1}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, { Name: []string{"parent_field_2"}, Index: []int{2}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, } - structActual := rivertags.Get(reflect.TypeOf(Struct{})) + structActual := syntaxtags.Get(reflect.TypeOf(Struct{})) assert.Equal(t, expect, structActual) - structPointerActual := rivertags.Get(reflect.TypeOf(StructWithPointer{})) + structPointerActual := syntaxtags.Get(reflect.TypeOf(StructWithPointer{})) assert.Equal(t, expect, structPointerActual) } @@ -113,20 +113,20 @@ func TestDeepSquash(t *testing.T) { Inner InnerStruct `river:",squash"` } - expect := []rivertags.Field{ + expect := []syntaxtags.Field{ { Name: []string{"inner_field_1"}, Index: []int{0, 0, 0}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, { Name: []string{"inner_field_2"}, Index: []int{0, 0, 1}, - Flags: rivertags.FlagAttr, + Flags: syntaxtags.FlagAttr, }, } - structActual := rivertags.Get(reflect.TypeOf(Struct{})) + structActual := syntaxtags.Get(reflect.TypeOf(Struct{})) assert.Equal(t, expect, structActual) } @@ -134,40 +134,40 @@ func Test_Get_Panics(t *testing.T) { expectPanic := func(t *testing.T, expect string, v interface{}) { t.Helper() require.PanicsWithValue(t, expect, func() { - _ = rivertags.Get(reflect.TypeOf(v)) + _ = syntaxtags.Get(reflect.TypeOf(v)) }) } t.Run("Tagged fields must be exported", func(t *testing.T) { type Struct struct { - attr string `river:"field,attr"` // nolint:unused //nolint:rivertags + attr string `river:"field,attr"` // nolint:unused //nolint:syntaxtags } - expect := `river: river tag found on unexported field at rivertags_test.Struct.attr` + expect := `syntax: river tag found on unexported field at syntaxtags_test.Struct.attr` expectPanic(t, expect, Struct{}) }) t.Run("Options are required", func(t *testing.T) { type Struct struct { - Attr string `river:"field"` //nolint:rivertags + Attr string `river:"field"` //nolint:syntaxtags } - expect := `river: field rivertags_test.Struct.Attr tag is missing options` + expect := `syntax: field syntaxtags_test.Struct.Attr tag is missing options` expectPanic(t, expect, Struct{}) }) t.Run("Field names must be unique", func(t *testing.T) { type Struct struct { Attr string `river:"field1,attr"` - Block string `river:"field1,block,optional"` //nolint:rivertags + Block string `river:"field1,block,optional"` //nolint:syntaxtags } - expect := `river: field name field1 already used by rivertags_test.Struct.Attr` + expect := `syntax: field name field1 already used by syntaxtags_test.Struct.Attr` expectPanic(t, expect, Struct{}) }) t.Run("Name is required for non-label field", func(t *testing.T) { type Struct struct { - Attr string `river:",attr"` //nolint:rivertags + Attr string `river:",attr"` //nolint:syntaxtags } - expect := `river: non-empty field name required at rivertags_test.Struct.Attr` + expect := `syntaxtags: non-empty field name required at syntaxtags_test.Struct.Attr` expectPanic(t, expect, Struct{}) }) @@ -176,7 +176,7 @@ func Test_Get_Panics(t *testing.T) { Label1 string `river:",label"` Label2 string `river:",label"` } - expect := `river: label field already used by rivertags_test.Struct.Label2` + expect := `syntax: label field already used by syntaxtags_test.Struct.Label2` expectPanic(t, expect, Struct{}) }) } diff --git a/syntax/internal/value/tag_cache.go b/syntax/internal/value/tag_cache.go index 2bce16209d..491a74b81d 100644 --- a/syntax/internal/value/tag_cache.go +++ b/syntax/internal/value/tag_cache.go @@ -3,7 +3,7 @@ package value import ( "reflect" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" ) // tagsCache caches the river tags for a struct type. This is never cleared, @@ -20,17 +20,17 @@ func getCachedTags(t reflect.Type) *objectFields { return entry } - ff := rivertags.Get(t) + ff := syntaxtags.Get(t) // Build a tree of keys. tree := &objectFields{ - fields: make(map[string]rivertags.Field), + fields: make(map[string]syntaxtags.Field), nestedFields: make(map[string]*objectFields), keys: []string{}, } for _, f := range ff { - if f.Flags&rivertags.FlagLabel != 0 { + if f.Flags&syntaxtags.FlagLabel != 0 { // Skip over label tags. tree.labelField = f continue @@ -52,7 +52,7 @@ func getCachedTags(t reflect.Type) *objectFields { inner, ok := node.nestedFields[name] if !ok { inner = &objectFields{ - fields: make(map[string]rivertags.Field), + fields: make(map[string]syntaxtags.Field), nestedFields: make(map[string]*objectFields), keys: []string{}, } @@ -66,14 +66,14 @@ func getCachedTags(t reflect.Type) *objectFields { return tree } -// objectFields is a parsed tree of fields in rivertags. It forms a tree where +// objectFields is a parsed tree of fields in syntaxtags. It forms a tree where // leaves are nested fields (e.g., for block names that have multiple name // fragments) and nodes are the fields themselves. type objectFields struct { - fields map[string]rivertags.Field + fields map[string]syntaxtags.Field nestedFields map[string]*objectFields keys []string // Combination of fields + nestedFields - labelField rivertags.Field + labelField syntaxtags.Field } type objectKeyType int @@ -103,7 +103,7 @@ func (of *objectFields) Len() int { return len(of.keys) } func (of *objectFields) Keys() []string { return of.keys } // Field gets a non-nested field. Returns false if name is a nested field. -func (of *objectFields) Field(name string) (rivertags.Field, bool) { +func (of *objectFields) Field(name string) (syntaxtags.Field, bool) { f, ok := of.fields[name] return f, ok } @@ -116,6 +116,6 @@ func (of *objectFields) NestedField(name string) (*objectFields, bool) { } // LabelField returns the field used for the label (if any). -func (of *objectFields) LabelField() (rivertags.Field, bool) { +func (of *objectFields) LabelField() (syntaxtags.Field, bool) { return of.labelField, of.labelField.Index != nil } diff --git a/syntax/token/builder/builder.go b/syntax/token/builder/builder.go index ed1fe375db..b92408dbd4 100644 --- a/syntax/token/builder/builder.go +++ b/syntax/token/builder/builder.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" "github.com/grafana/river/internal/value" "github.com/grafana/river/token" ) @@ -156,9 +156,9 @@ func (b *Body) AppendFrom(goValue interface{}) { // getBlockLabel returns the label for a given block. func getBlockLabel(rv reflect.Value) string { - tags := rivertags.Get(rv.Type()) + tags := syntaxtags.Get(rv.Type()) for _, tag := range tags { - if tag.Flags&rivertags.FlagLabel != 0 { + if tag.Flags&syntaxtags.FlagLabel != 0 { return reflectutil.Get(rv, tag).String() } } @@ -177,7 +177,7 @@ func (b *Body) encodeFields(rv reflect.Value) { panic(fmt.Sprintf("syntax/token/builder: can only encode struct values to bodies, got %s", rv.Type())) } - fields := rivertags.Get(rv.Type()) + fields := syntaxtags.Get(rv.Type()) defaults := reflect.New(rv.Type()).Elem() if defaults.CanAddr() && defaults.Addr().Type().Implements(goRiverDefaulter) { defaults.Addr().Interface().(value.Defaulter).SetToDefault() @@ -202,7 +202,7 @@ func (b *Body) encodeFields(rv reflect.Value) { } } -func (b *Body) encodeField(prefix []string, field rivertags.Field, fieldValue reflect.Value) { +func (b *Body) encodeField(prefix []string, field syntaxtags.Field, fieldValue reflect.Value) { fieldName := strings.Join(field.Name, ".") for fieldValue.Kind() == reflect.Pointer { @@ -243,7 +243,7 @@ func (b *Body) encodeField(prefix []string, field rivertags.Field, fieldValue re // Recursively call encodeField for each element in the slice/array for // non-zero blocks. The recursive call will hit the case below and add // a new block for each field encountered. - if field.Flags&rivertags.FlagOptional != 0 && elem.IsZero() { + if field.Flags&syntaxtags.FlagOptional != 0 && elem.IsZero() { continue } b.encodeField(prefix, field, elem) @@ -293,7 +293,7 @@ func (b *Body) encodeEnumElement(prefix []string, enumElement reflect.Value) { enumElement = enumElement.Elem() } - fields := rivertags.Get(enumElement.Type()) + fields := syntaxtags.Get(enumElement.Type()) // Find the first non-zero field and encode it. for _, field := range fields { diff --git a/syntax/vm/struct_decoder.go b/syntax/vm/struct_decoder.go index 1c0cfdb9ee..cff40e2e93 100644 --- a/syntax/vm/struct_decoder.go +++ b/syntax/vm/struct_decoder.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/river/ast" "github.com/grafana/river/diag" "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" "github.com/grafana/river/internal/value" ) @@ -110,7 +110,7 @@ func (st *structDecoder) Decode(stmts ast.Body, rv reflect.Value) error { } type decodeOptions struct { - Tags map[string]rivertags.Field + Tags map[string]syntaxtags.Field EnumBlocks map[string]enumBlock SeenAttrs, SeenBlocks, SeenEnums map[string]struct{} diff --git a/syntax/vm/tag_cache.go b/syntax/vm/tag_cache.go index f9c1b69c56..8f1b534556 100644 --- a/syntax/vm/tag_cache.go +++ b/syntax/vm/tag_cache.go @@ -5,7 +5,7 @@ import ( "strings" "sync" - "github.com/grafana/river/internal/rivertags" + "github.com/grafana/river/internal/syntaxtags" ) // tagsCache caches the river tags for a struct type. This is never cleared, @@ -22,10 +22,10 @@ func getCachedTagInfo(t reflect.Type) *tagInfo { return entry.(*tagInfo) } - tfs := rivertags.Get(t) + tfs := syntaxtags.Get(t) ti := &tagInfo{ Tags: tfs, - TagLookup: make(map[string]rivertags.Field, len(tfs)), + TagLookup: make(map[string]syntaxtags.Field, len(tfs)), EnumLookup: make(map[string]enumBlock), // The length is not known ahead of time } @@ -66,8 +66,8 @@ func deferenceType(ty reflect.Type) reflect.Type { } type tagInfo struct { - Tags []rivertags.Field - TagLookup map[string]rivertags.Field + Tags []syntaxtags.Field + TagLookup map[string]syntaxtags.Field // EnumLookup maps enum blocks to the enum field. For example, an enum block // called "foo.foo" and "foo.bar" will both map to the "foo" enum field. @@ -75,6 +75,6 @@ type tagInfo struct { } type enumBlock struct { - EnumField rivertags.Field // Field in the parent struct of the enum slice - BlockField rivertags.Field // Field in the enum struct for the enum block + EnumField syntaxtags.Field // Field in the parent struct of the enum slice + BlockField syntaxtags.Field // Field in the enum struct for the enum block } diff --git a/syntax/vm/vm.go b/syntax/vm/vm.go index 205f561efe..42d2a3b942 100644 --- a/syntax/vm/vm.go +++ b/syntax/vm/vm.go @@ -9,8 +9,8 @@ import ( "github.com/grafana/river/ast" "github.com/grafana/river/diag" "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/rivertags" "github.com/grafana/river/internal/stdlib" + "github.com/grafana/river/internal/syntaxtags" "github.com/grafana/river/internal/value" ) @@ -228,13 +228,13 @@ func (vm *Evaluator) evaluateMap(scope *Scope, assoc map[value.Value]ast.Node, n return nil } -func (vm *Evaluator) evaluateBlockLabel(node *ast.BlockStmt, tfs []rivertags.Field, rv reflect.Value) error { +func (vm *Evaluator) evaluateBlockLabel(node *ast.BlockStmt, tfs []syntaxtags.Field, rv reflect.Value) error { var ( - labelField rivertags.Field + labelField syntaxtags.Field foundField bool ) for _, tf := range tfs { - if tf.Flags&rivertags.FlagLabel != 0 { + if tf.Flags&syntaxtags.FlagLabel != 0 { labelField = tf foundField = true break From 7e6bd2f487fae01ac95f06382e071f0627e88c65 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 1 Mar 2024 13:11:42 -0500 Subject: [PATCH 009/136] Remove classic modules (#20) This commit: * Removes the ability for users to configure classic module loaders (module.string, module.file, module.git, module.http) * Removes documentation for the removed components * Removes documentation referencing "classic modules" However, the code which enables classic modules has not been removed; this removal will come in a future clean-up effort. --- docs/sources/concepts/modules.md | 136 ----------- .../reference/components/module.file.md | 159 ------------- .../reference/components/module.git.md | 212 ------------------ .../reference/components/module.http.md | 167 -------------- .../reference/components/module.string.md | 158 ------------- .../reference/config-blocks/argument.md | 7 - .../sources/reference/config-blocks/export.md | 7 - internal/component/all/all.go | 4 - internal/flow/componenttest/testfailmodule.go | 2 +- internal/flow/import_test.go | 2 +- .../testcomponents}/module/file/file.go | 2 +- .../testcomponents}/module/git/git.go | 2 +- .../testcomponents}/module/http/http.go | 2 +- .../internal/testcomponents}/module/module.go | 0 .../testcomponents}/module/string/string.go | 2 +- internal/flow/module_eval_test.go | 2 +- 16 files changed, 7 insertions(+), 857 deletions(-) delete mode 100644 docs/sources/reference/components/module.file.md delete mode 100644 docs/sources/reference/components/module.git.md delete mode 100644 docs/sources/reference/components/module.http.md delete mode 100644 docs/sources/reference/components/module.string.md rename internal/{component => flow/internal/testcomponents}/module/file/file.go (98%) rename internal/{component => flow/internal/testcomponents}/module/git/git.go (98%) rename internal/{component => flow/internal/testcomponents}/module/http/http.go (98%) rename internal/{component => flow/internal/testcomponents}/module/module.go (100%) rename internal/{component => flow/internal/testcomponents}/module/string/string.go (96%) diff --git a/docs/sources/concepts/modules.md b/docs/sources/concepts/modules.md index 37853be78f..fb0bf2abe6 100644 --- a/docs/sources/concepts/modules.md +++ b/docs/sources/concepts/modules.md @@ -98,142 +98,6 @@ loki.write "default" { } ``` -{{< collapse title="Classic modules" >}} -# Classic modules (deprecated) - -{{< admonition type="caution" >}} -Modules were redesigned in v0.40 to simplify concepts. -This section outlines the design of the original modules prior to v0.40. -Classic modules are scheduled to be removed in the release after v0.40. -{{< /admonition >}} - -You use _Modules_ to create {{< param "PRODUCT_NAME" >}} configurations that you can load as a component. -Modules are a great way to parameterize a configuration to create reusable pipelines. - -Modules are {{< param "PRODUCT_NAME" >}} configurations which have: - -* _Arguments_: Settings that configure a module. -* _Exports_: Named values that a module exposes to the consumer of the module. -* _Components_: {{< param "PRODUCT_NAME" >}} components to run when the module is running. - -You use a [Module loader][] to load Modules into {{< param "PRODUCT_NAME" >}}. - -Refer to [argument block][] and [export block][] to learn how to define arguments and exports for a module. - -## Module loaders - -A _Module loader_ is a {{< param "PRODUCT_NAME" >}} component that retrieves a module and runs the defined components. - -Module loader components are responsible for the following functions: - -* Retrieving the module source. -* Creating a [Component controller][] for the module. -* Passing arguments to the loaded module. -* Exposing exports from the loaded module. - -Module loaders are typically called `module.LOADER_NAME`. - -{{< admonition type="note" >}} -Some module loaders may not support running modules with arguments or exports. -{{< /admonition >}} - -Refer to [Components][] for more information about the module loader components. - -## Module sources - -Modules are flexible, and you can retrieve their configuration anywhere, such as: - -* The local filesystem. -* An S3 bucket. -* An HTTP endpoint. - -Each module loader component supports different ways of retrieving `module.sources`. -The most generic module loader component, `module.string`, can load modules from the export of another {{< param "PRODUCT_NAME" >}} component. - -```river -local.file "my_module" { - filename = "PATH_TO_MODULE" -} - -module.string "my_module" { - content = local.file.my_module.content - - arguments { - MODULE_ARGUMENT_NAME_1 = MODULE_ARGUMENT_VALUE_1 - MODULE_ARGUMENT_NAME_2 = MODULE_ARGUMENT_VALUE_2 - // ... - } -} -``` - -## Example module - -This example module manages a pipeline that filters out debug-level and info-level log lines. - -```river -// argument.write_to is a required argument that specifies where filtered -// log lines are sent. -// -// The value of the argument is retrieved in this file with -// argument.write_to.value. -argument "write_to" { - optional = false -} - -// loki.process.filter is our component which executes the filtering, passing -// filtered logs to argument.write_to.value. -loki.process "filter" { - // Drop all debug- and info-level logs. - stage.match { - selector = "{job!=\"\"} |~ \"level=(debug|info)\"" - action = "drop" - } - - // Send processed logs to our argument. - forward_to = argument.write_to.value -} - -// export.filter_input exports a value to the module consumer. -export "filter_input" { - // Expose the receiver of loki.process so the module consumer can send - // logs to our loki.process component. - value = loki.process.filter.receiver -} -``` - -You can save the module to a file and then use it as a processing step before writing logs to Loki. - -```river -loki.source.file "self" { - targets = LOG_TARGETS - - // Forward collected logs to the input of our filter. - forward_to = [module.file.log_filter.exports.filter_input] -} - -module.file "log_filter" { - filename = "/path/to/modules/log_filter.river" - - arguments { - // Configure the filter to forward filtered logs to loki.write below. - write_to = [loki.write.default.receiver], - } -} - -loki.write "default" { - endpoint { - url = "LOKI_URL" - } -} -``` - -[Module loader]: #module-loaders -[argument block]: ../../reference/config-blocks/argument/ -[export block]: ../../reference/config-blocks/export/ -[Component controller]: ../component_controller/ -[Components]: ../../reference/components/ -{{< /collapse >}} - [custom components]: ../custom_components/ [run]: ../../reference/cli/run/ [import.file]: ../../reference/config-blocks/import.file/ diff --git a/docs/sources/reference/components/module.file.md b/docs/sources/reference/components/module.file.md deleted file mode 100644 index 78dc24e8ad..0000000000 --- a/docs/sources/reference/components/module.file.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -canonical: https://grafana.com/docs/alloy/latest/reference/components/module.file/ -description: Learn about module.file -labels: - stage: beta -title: module.file ---- - -# module.file - -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - -`module.file` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} -component which retrieves a [module][] and runs the components defined inside of it. - -`module.file` simplifies the configurations for modules loaded from a file by embedding -a [local.file][] component. This allows a single module loader to do the equivalence of -using the more generic [module.string][] paired with a [local.file][] component. - -[module]: ../../../concepts/modules/ -[local.file]: ../local.file/ -[module.string]: ../module.string/ - -## Usage - -```river -module.file "LABEL" { - filename = FILENAME - - arguments { - MODULE_ARGUMENT_1 = VALUE_1 - MODULE_ARGUMENT_2 = VALUE_2 - ... - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ------------------|------------|----------------------------------------------------|--------------|--------- -`filename` | `string` | Path of the file on disk to watch | | yes -`detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no -`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no - -[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets - -{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} - -## Blocks - -The following blocks are supported inside the definition of `module.file`: - -Hierarchy | Block | Description | Required -----------|---------------|----------------------------------|--------- -arguments | [arguments][] | Arguments to pass to the module. | no - -[arguments]: #arguments-block - -### arguments block - -The `arguments` block specifies the list of values to pass to the loaded -module. - -The attributes provided in the `arguments` block are validated based on the -[argument blocks][] defined in the module source: - -* If a module source marks one of its arguments as required, it must be - provided as an attribute in the `arguments` block of the module loader. - -* Attributes in the `argument` block of the module loader will be rejected if - they are not defined in the module source. - -[argument blocks]: ../../config-blocks/argument/ - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description -----------|------------|---------------------------------- -`exports` | `map(any)` | The exports of the Module loader. - -`exports` exposes the `export` config block inside a module. It can be accessed -from the parent config via `module.file.LABEL.exports.EXPORT_LABEL`. - -Values in `exports` correspond to [export blocks][] defined in the module source. - -[export blocks]: ../../config-blocks/export/ - -## Component health - -`module.file` is reported as healthy if the most recent load of the module was -successful. - -If the module is not loaded successfully, the current health displays as -unhealthy and the health includes the error from loading the module. - -## Debug information - -`module.file` does not expose any component-specific debug information. - -## Debug metrics - -`module.file` does not expose any component-specific debug metrics. - -## Example - -In this example, we pass credentials from a parent config to a module which loads -a `prometheus.remote_write` component. The exports of the -`prometheus.remote_write` component are exposed to parent config, allowing -the parent config to pass metrics to it. - -Parent: - -```river -module.file "metrics" { - filename = "/path/to/prometheus_remote_write_module.river" - - arguments { - username = env("PROMETHEUS_USERNAME") - password = env("PROMETHEUS_PASSWORD") - } -} - -prometheus.exporter.unix "default" { } - -prometheus.scrape "local_agent" { - targets = prometheus.exporter.unix.default.targets - forward_to = [module.file.metrics.exports.prometheus_remote_write.receiver] - scrape_interval = "10s" -} -``` - -Module: - -```river -argument "username" { } - -argument "password" { } - -export "prometheus_remote_write" { - value = prometheus.remote_write.grafana_cloud -} - -prometheus.remote_write "grafana_cloud" { - endpoint { - url = "https://prometheus-us-central1.grafana.net/api/prom/push" - - basic_auth { - username = argument.username.value - password = argument.password.value - } - } -} -``` diff --git a/docs/sources/reference/components/module.git.md b/docs/sources/reference/components/module.git.md deleted file mode 100644 index a0a821a008..0000000000 --- a/docs/sources/reference/components/module.git.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -canonical: https://grafana.com/docs/alloy/latest/reference/components/module.git/ -description: Learn about module.git -labels: - stage: beta -title: module.git ---- - -# module.git - -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - -`module.git` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} -component which retrieves a [module][] and runs the components defined inside of it. - -`module.git` retrieves a module source from a file in a Git repository. - -[module]: ../../../concepts/modules/ - -## Usage - -```river -module.git "LABEL" { - repository = "GIT_REPOSTORY" - path = "PATH_TO_MODULE" - - arguments { - MODULE_ARGUMENT_1 = VALUE_1 - MODULE_ARGUMENT_2 = VALUE_2 - ... - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ------------------|------------|---------------------------------------------------------|----------|--------- -`repository` | `string` | The Git repository address to retrieve the module from. | | yes -`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no -`path` | `string` | The path in the repository where the module is stored. | | yes -`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no - -The `repository` attribute must be set to a repository address that would be -recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as -`htts://github.com/grafana/agent.git`. - -The `revision` attribute, when provided, must be set to a valid branch, tag, or -commit SHA within the repository. - -The `path` attribute must be set to a path which is accessible from the root of -the repository, such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. - -If `pull_frequency` is not `"0s"`, the Git repository will be pulled for -updates at the frequency specified, causing the loaded module to update with -the retrieved changes. - -## Blocks - -The following blocks are supported inside the definition of `module.git`: - -Hierarchy | Block | Description | Required ------------|----------------|------------------------------------------------------|--------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the repo. | no -ssh_key | [ssh_key][] | Configure a SSH Key for authenticating to the repo. | no -arguments | [arguments][] | Arguments to pass to the module. | no - -[basic_auth]: #basic_auth-block -[ssh_key]: #ssh_key-block -[arguments]: #arguments-block - -### basic_auth block - -{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} - -### ssh_key block - -Name | Type | Description | Default | Required --------------|----------|-----------------------------------|---------|--------- -`username` | `string` | SSH username. | | yes -`key` | `secret` | SSH private key | | no -`key_file` | `string` | SSH private key path. | | no -`passphrase` | `secret` | Passphrase for SSH key if needed. | | no - -### arguments block - -The `arguments` block specifies the list of values to pass to the loaded -module. - -The attributes provided in the `arguments` block are validated based on the -[argument blocks][] defined in the module source: - -* If a module source marks one of its arguments as required, it must be - provided as an attribute in the `arguments` block of the module loader. - -* Attributes in the `argument` block of the module loader will be rejected if - they are not defined in the module source. - -[argument blocks]: ../../config-blocks/argument/ - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description -----------|------------|---------------------------------- -`exports` | `map(any)` | The exports of the Module loader. - -`exports` exposes the `export` config block inside a module. It can be accessed -from the parent config via `module.git.COMPONENT_LABEL.exports.EXPORT_LABEL`. - -Values in `exports` correspond to [export blocks][] defined in the module -source. - -[export blocks]: ../../config-blocks/export/ - -## Component health - -`module.git` is reported as healthy if the repository was cloned successfully -and most recent load of the module was successful. - -## Debug information - -`module.git` includes debug information for: - -* The full SHA of the currently checked out revision. -* The most recent error when trying to fetch the repository, if any. - -## Debug metrics - -`module.git` does not expose any component-specific debug metrics. - -## Examples - -This example uses a module loaded from a Git repository which adds two numbers: - -```river -module.git "add" { - repository = "https://github.com/rfratto/agent-modules.git" - revision = "main" - path = "add/module.river" - - arguments { - a = 15 - b = 45 - } -} -``` - -The same example as above using basic auth: -```river -module.git "add" { - repository = "https://github.com/rfratto/agent-modules.git" - revision = "main" - path = "add/module.river" - - basic_auth { - username = "USERNAME" - password = "PASSWORD" - } - - arguments { - a = 15 - b = 45 - } -} -``` - -Using SSH Key from another component: -```river -local.file "ssh_key" { - filename = "PATH/TO/SSH.KEY" - is_secret = true -} - -module.git "add" { - repository = "github.com:rfratto/agent-modules.git" - revision = "main" - path = "add/module.river" - - ssh_key { - username = "git" - key = local.file.ssh_key.content - } - - arguments { - a = 15 - b = 45 - } -} -``` - -The same example as above using SSH Key auth: -```river -module.git "add" { - repository = "github.com:rfratto/agent-modules.git" - revision = "main" - path = "add/module.river" - - ssh_key { - username = "git" - key_file = "PATH/TO/SSH.KEY" - } - - arguments { - a = 15 - b = 45 - } -} -``` diff --git a/docs/sources/reference/components/module.http.md b/docs/sources/reference/components/module.http.md deleted file mode 100644 index b0ccdf67b6..0000000000 --- a/docs/sources/reference/components/module.http.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -canonical: https://grafana.com/docs/alloy/latest/reference/components/module.http/ -description: Learn about module.http -labels: - stage: beta -title: module.http ---- - -# module.http - -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - -`module.http` is a [module loader][] component. - -`module.http` embeds a [remote.http][] component to retrieve the module from a remote -HTTP server. This allows you to use a single module loader, rather than a `remote.http` -component paired with a [module.string][] component. - -[module]: ../../../concepts/modules/ -[remote.http]: ../remote.http/ -[module.string]: ../module.string/ -[module loader]: ../../../concepts/modules/#module-loaders - -## Usage - -```river -module.http "LABEL" { - url = URL - - arguments { - MODULE_ARGUMENT_1 = VALUE_1 - ... - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ------------------|---------------|--------------------------------------------------------------|---------|--------- -`url` | `string` | URL to poll. | | yes -`method` | `string` | Define HTTP method for the request | `"GET"` | no -`headers` | `map(string)` | Custom headers for the request. | `{}` | no -`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no -`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no -`is_secret` | `bool` | Whether the response body should be treated as a [secret][]. | false | no - -[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets - -## Blocks - -The following blocks are supported inside the definition of `module.http`: - -Hierarchy | Block | Description | Required -----------|---------------|----------------------------------|--------- -arguments | [arguments][] | Arguments to pass to the module. | no - -[arguments]: #arguments-block - -### arguments block - -The `arguments` block specifies the list of values to pass to the loaded -module. - -The attributes provided in the `arguments` block are validated based on the -[argument blocks][] defined in the module source: - -* If a module source marks one of its arguments as required, it must be - provided as an attribute in the `arguments` block of the module loader. - -* Attributes in the `argument` block of the module loader are rejected if - they are not defined in the module source. - -[argument blocks]: ../../config-blocks/argument/ - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description -----------|------------|---------------------------------- -`exports` | `map(any)` | The exports of the Module loader. - -`exports` exposes the `export` config block inside a module. It can be accessed -from the parent config via `module.http.LABEL.exports.EXPORT_LABEL`. - -Values in `exports` correspond to [export blocks][] defined in the module -source. - -[export blocks]: ../../config-blocks/export/ - -## Component health - -`module.http` is reported as healthy if the most recent load of the module was -successful. - -Before the first load of the module, the health is reported as `Unknown`. - -If the module is not loaded successfully, the current health displays as -unhealthy, and the health includes the error from loading the module. - -## Debug information - -`module.http` does not expose any component-specific debug information. - -## Debug metrics - -`module.http` does not expose any component-specific debug metrics. - -## Example - -In this example, the `module.http` component loads a module from a locally running -HTTP server, polling for changes once every minute. - -The module sets up a Redis exporter and exports the list of targets to the parent config to scrape -and remote write. - - -Parent: - -```river -module.http "remote_module" { - url = "http://localhost:8080/redis_module.yaml" - poll_frequency = "1m" -} - -prometheus.exporter.unix "default" { } - -prometheus.scrape "local_agent" { - targets = concat(prometheus.exporter.unix.default.targets, module.http.remote_module.exports.targets) - forward_to = [module.http.metrics.exports.prometheus_remote_write.receiver] - scrape_interval = "10s" -} - -prometheus.remote_write "default" { - endpoint { - url = PROMETHEUS_REMOTE_WRITE_URL - - basic_auth { - username = USERNAME - password = PASSWORD - } - } -} -``` -Replace the following: - - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - - `USERNAME`: The username to use for authentication to the remote_write API. - - `PASSWORD`: The password to use for authentication to the remote_write API. - -Module: - -```river -prometheus.exporter.redis "local_redis" { - redis_addr = REDIS_ADDR - redis_password_file = REDIS_PASSWORD_FILE -} - -export "redis_targets" { - value = prometheus.exporter.redis.local_redis.targets -} -``` -Replace the following: - - `REDIS_ADDR`: The address of your Redis instance. - - `REDIS_PASSWORD_FILE`: The path to a file containing the password for your Redis instance. diff --git a/docs/sources/reference/components/module.string.md b/docs/sources/reference/components/module.string.md deleted file mode 100644 index ee4fbd2a8d..0000000000 --- a/docs/sources/reference/components/module.string.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -canonical: https://grafana.com/docs/alloy/latest/reference/components/module.string/ -description: Learn about module.string -labels: - stage: beta -title: module.string ---- - -# module.string - -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - -`module.string` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} -component which retrieves a [module][] and runs the components defined inside of it. - -[module]: ../../../concepts/modules/ - -## Usage - -```river -module.string "LABEL" { - content = CONTENT - - arguments { - MODULE_ARGUMENT_1 = VALUE_1 - MODULE_ARGUMENT_2 = VALUE_2 - ... - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required -----------|----------------------|-----------------------------------------------------------|---------|--------- -`content` | `secret` or `string` | The contents of the module to load as a secret or string. | | yes - -`content` is a string that contains the configuration of the module to load. -`content` is typically loaded by using the exports of another component. For example, - -- `local.file.LABEL.content` -- `remote.http.LABEL.content` -- `remote.s3.LABEL.content` - -## Blocks - -The following blocks are supported inside the definition of `module.string`: - -Hierarchy | Block | Description | Required -----------|---------------|----------------------------------|--------- -arguments | [arguments][] | Arguments to pass to the module. | no - -[arguments]: #arguments-block - -### arguments block - -The `arguments` block specifies the list of values to pass to the loaded -module. - -The attributes provided in the `arguments` block are validated based on the -[argument blocks][] defined in the module source: - -* If a module source marks one of its arguments as required, it must be - provided as an attribute in the `arguments` block of the module loader. - -* Attributes in the `argument` block of the module loader will be rejected if - they are not defined in the module source. - -[argument blocks]: ../../config-blocks/argument/ - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description -----------|------------|---------------------------------- -`exports` | `map(any)` | The exports of the Module loader. - -`exports` exposes the `export` config block inside a module. It can be accessed -from the parent config via `module.string.LABEL.exports.EXPORT_LABEL`. - -Values in `exports` correspond to [export blocks][] defined in the module -source. - -[export blocks]: ../../config-blocks/export/ - -## Component health - -`module.string` is reported as healthy if the most recent load of the module was -successful. - -If the module is not loaded successfully, the current health displays as -unhealthy and the health includes the error from loading the module. - -## Debug information - -`module.string` does not expose any component-specific debug information. - -## Debug metrics - -`module.string` does not expose any component-specific debug metrics. - -## Example - -In this example, we pass credentials from a parent config to a module which loads -a `prometheus.remote_write` component. The exports of the -`prometheus.remote_write` component are exposed to parent config, allowing -the parent config to pass metrics to it. - -Parent: - -```river -local.file "metrics" { - filename = "/path/to/prometheus_remote_write_module.river" -} - -module.string "metrics" { - content = local.file.metrics.content - - arguments { - username = env("PROMETHEUS_USERNAME") - password = env("PROMETHEUS_PASSWORD") - } -} - -prometheus.exporter.unix "default" { } - -prometheus.scrape "local_agent" { - targets = prometheus.exporter.unix.default.targets - forward_to = [module.string.metrics.exports.prometheus_remote_write.receiver] - scrape_interval = "10s" -} -``` - -Module: - -```river -argument "username" { } - -argument "password" { } - -export "prometheus_remote_write" { - value = prometheus.remote_write.grafana_cloud -} - -prometheus.remote_write "grafana_cloud" { - endpoint { - url = "https://prometheus-us-central1.grafana.net/api/prom/push" - - basic_auth { - username = argument.username.value - password = argument.password.value - } - } -} -``` diff --git a/docs/sources/reference/config-blocks/argument.md b/docs/sources/reference/config-blocks/argument.md index ff265c9e31..56676ee6f7 100644 --- a/docs/sources/reference/config-blocks/argument.md +++ b/docs/sources/reference/config-blocks/argument.md @@ -14,13 +14,6 @@ title: argument block The `argument` block may only be specified inside the definition of [a `declare` block][declare]. -{{< admonition type="note" >}} -In [classic modules][], the `argument` block is valid as a top-level block in a classic module. -Classic modules are deprecated and scheduled to be removed in the release after v0.40. - -[classic modules]: ../../../concepts/modules/#classic-modules-deprecated -{{< /admonition >}} - ## Example ```river diff --git a/docs/sources/reference/config-blocks/export.md b/docs/sources/reference/config-blocks/export.md index 4b28a6497d..72d365be27 100644 --- a/docs/sources/reference/config-blocks/export.md +++ b/docs/sources/reference/config-blocks/export.md @@ -14,13 +14,6 @@ title: export block The `export` block may only be specified inside the definition of [a `declare` block][declare]. -{{< admonition type="note" >}} -In [classic modules][], the `export` block is valid as a top-level block in a classic module. -Classic modules are deprecated and scheduled to be removed in the release after v0.40. - -[classic modules]: ../../../concepts/modules/#classic-modules-deprecated -{{< /admonition >}} - ## Example ```river diff --git a/internal/component/all/all.go b/internal/component/all/all.go index f6911eb95b..7cc147ea80 100644 --- a/internal/component/all/all.go +++ b/internal/component/all/all.go @@ -57,10 +57,6 @@ import ( _ "github.com/grafana/agent/internal/component/loki/source/windowsevent" // Import loki.source.windowsevent _ "github.com/grafana/agent/internal/component/loki/write" // Import loki.write _ "github.com/grafana/agent/internal/component/mimir/rules/kubernetes" // Import mimir.rules.kubernetes - _ "github.com/grafana/agent/internal/component/module/file" // Import module.file - _ "github.com/grafana/agent/internal/component/module/git" // Import module.git - _ "github.com/grafana/agent/internal/component/module/http" // Import module.http - _ "github.com/grafana/agent/internal/component/module/string" // Import module.string _ "github.com/grafana/agent/internal/component/otelcol/auth/basic" // Import otelcol.auth.basic _ "github.com/grafana/agent/internal/component/otelcol/auth/bearer" // Import otelcol.auth.bearer _ "github.com/grafana/agent/internal/component/otelcol/auth/headers" // Import otelcol.auth.headers diff --git a/internal/flow/componenttest/testfailmodule.go b/internal/flow/componenttest/testfailmodule.go index 297579880b..b5f4de1757 100644 --- a/internal/flow/componenttest/testfailmodule.go +++ b/internal/flow/componenttest/testfailmodule.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/grafana/agent/internal/component" - mod "github.com/grafana/agent/internal/component/module" "github.com/grafana/agent/internal/featuregate" + mod "github.com/grafana/agent/internal/flow/internal/testcomponents/module" ) func init() { diff --git a/internal/flow/import_test.go b/internal/flow/import_test.go index 65642605f4..e70e498af2 100644 --- a/internal/flow/import_test.go +++ b/internal/flow/import_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/tools/txtar" - _ "github.com/grafana/agent/internal/component/module/string" + _ "github.com/grafana/agent/internal/flow/internal/testcomponents/module/string" ) // The tests are using the .txtar files stored in the testdata folder. diff --git a/internal/component/module/file/file.go b/internal/flow/internal/testcomponents/module/file/file.go similarity index 98% rename from internal/component/module/file/file.go rename to internal/flow/internal/testcomponents/module/file/file.go index 7c0ec3dd11..89e6f23f7d 100644 --- a/internal/component/module/file/file.go +++ b/internal/flow/internal/testcomponents/module/file/file.go @@ -8,8 +8,8 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/local/file" - "github.com/grafana/agent/internal/component/module" "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/agent/internal/flow/internal/testcomponents/module" "github.com/grafana/river/rivertypes" ) diff --git a/internal/component/module/git/git.go b/internal/flow/internal/testcomponents/module/git/git.go similarity index 98% rename from internal/component/module/git/git.go rename to internal/flow/internal/testcomponents/module/git/git.go index b1a363b4da..2a57a21278 100644 --- a/internal/component/module/git/git.go +++ b/internal/flow/internal/testcomponents/module/git/git.go @@ -11,8 +11,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/module" "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/agent/internal/flow/internal/testcomponents/module" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/vcs" ) diff --git a/internal/component/module/http/http.go b/internal/flow/internal/testcomponents/module/http/http.go similarity index 98% rename from internal/component/module/http/http.go rename to internal/flow/internal/testcomponents/module/http/http.go index af33860f60..04ca9043f8 100644 --- a/internal/component/module/http/http.go +++ b/internal/flow/internal/testcomponents/module/http/http.go @@ -7,9 +7,9 @@ import ( "go.uber.org/atomic" "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/module" remote_http "github.com/grafana/agent/internal/component/remote/http" "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/agent/internal/flow/internal/testcomponents/module" "github.com/grafana/river/rivertypes" ) diff --git a/internal/component/module/module.go b/internal/flow/internal/testcomponents/module/module.go similarity index 100% rename from internal/component/module/module.go rename to internal/flow/internal/testcomponents/module/module.go diff --git a/internal/component/module/string/string.go b/internal/flow/internal/testcomponents/module/string/string.go similarity index 96% rename from internal/component/module/string/string.go rename to internal/flow/internal/testcomponents/module/string/string.go index e631c2646b..6a51c5cedb 100644 --- a/internal/component/module/string/string.go +++ b/internal/flow/internal/testcomponents/module/string/string.go @@ -4,8 +4,8 @@ import ( "context" "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/module" "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/agent/internal/flow/internal/testcomponents/module" "github.com/grafana/river/rivertypes" ) diff --git a/internal/flow/module_eval_test.go b/internal/flow/module_eval_test.go index 94b811b423..4447fa530f 100644 --- a/internal/flow/module_eval_test.go +++ b/internal/flow/module_eval_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" - _ "github.com/grafana/agent/internal/component/module/string" + _ "github.com/grafana/agent/internal/flow/internal/testcomponents/module/string" ) func TestUpdates_EmptyModule(t *testing.T) { From a412cae632f8b4df33a8b7c45c062794c6fb53b5 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Fri, 1 Mar 2024 11:43:32 -0800 Subject: [PATCH 010/136] Update Alloy docs and fix Makefile (#22) * Some minor tweaks and updates to remove flow and agent refs * Fix Makefile so local doc webserver will build --- docs/Makefile | 3 +-- docs/sources/about.md | 2 +- docs/sources/reference/cli/convert.md | 2 +- .../components/discovery.lightsail.md | 5 ---- .../reference/components/loki.process.md | 5 ---- .../otelcol.processor.tail_sampling.md | 5 ---- .../components/prometheus.remote_write.md | 23 +++++++------------ docs/sources/shared/deploy-alloy.md | 22 +++++++++--------- docs/sources/tasks/migrate/from-operator.md | 2 +- 9 files changed, 23 insertions(+), 46 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index ea9ddf0df3..e233b21be6 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -11,8 +11,7 @@ include docs.mk docs: check-cloudwatch-integration check-cloudwatch-integration: - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md + $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/reference/components/prometheus.exporter.cloudwatch.md generate-cloudwatch-integration: $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go generate diff --git a/docs/sources/about.md b/docs/sources/about.md index a2df0de45c..a2dcb00b71 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -72,7 +72,7 @@ prometheus.remote_write "default" { ## {{% param "PRODUCT_NAME" %}} configuration generator -The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating flow code. +The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating {{< param "PRODUCT_NAME" >}} configurations. {{< admonition type="note" >}} This feature is experimental, and it doesn't support all River components. diff --git a/docs/sources/reference/cli/convert.md b/docs/sources/reference/cli/convert.md index 1a8ccfc7b2..d736fb7186 100644 --- a/docs/sources/reference/cli/convert.md +++ b/docs/sources/reference/cli/convert.md @@ -70,7 +70,7 @@ Using the `--source-format=promtail` will convert the source configuration from Nearly all [Promtail features][] are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. -If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a flow configuration. +If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a {{< param "PRODUCT_NAME" >}} configuration. The converter will also raise warnings for configuration options that may require your attention. Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate promtail] for a detailed migration guide. diff --git a/docs/sources/reference/components/discovery.lightsail.md b/docs/sources/reference/components/discovery.lightsail.md index c6f959ff54..fabb8a3825 100644 --- a/docs/sources/reference/components/discovery.lightsail.md +++ b/docs/sources/reference/components/discovery.lightsail.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.lightsail/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.lightsail/ canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.lightsail/ description: Learn about discovery.lightsail title: discovery.lightsail diff --git a/docs/sources/reference/components/loki.process.md b/docs/sources/reference/components/loki.process.md index 93f3455d0b..d432afd185 100644 --- a/docs/sources/reference/components/loki.process.md +++ b/docs/sources/reference/components/loki.process.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.process/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process/ canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.process/ description: Learn about loki.process title: loki.process diff --git a/docs/sources/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/reference/components/otelcol.processor.tail_sampling.md index c27fae1098..e301d11816 100644 --- a/docs/sources/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/reference/components/otelcol.processor.tail_sampling.md @@ -1,9 +1,4 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.tail_sampling/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.tail_sampling/ canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.tail_sampling/ description: Learn about otelcol.processor.tail_sampling labels: diff --git a/docs/sources/reference/components/prometheus.remote_write.md b/docs/sources/reference/components/prometheus.remote_write.md index 480e6b7120..3084aae1d1 100644 --- a/docs/sources/reference/components/prometheus.remote_write.md +++ b/docs/sources/reference/components/prometheus.remote_write.md @@ -433,8 +433,7 @@ retention directly to the data age itself, as the truncation logic works on _segments_, not the samples themselves. This makes data retention less predictable when the component receives a non-consistent rate of data. -The [WAL block][] in Flow mode, or the [metrics config][] in Static mode -contain some configurable parameters that can be used to control the tradeoff +The [WAL block][] contains some configurable parameters that can be used to control the tradeoff between memory usage, disk usage, and data retention. The `truncate_frequency` or `wal_truncate_frequency` parameter configures the @@ -496,18 +495,14 @@ To delete the corrupted WAL: 1. Find and delete the contents of the `wal` directory. By default the `wal` directory is a subdirectory - of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory - may be different than the default depending on the [wal_directory][] setting in your Static configuration - file or the path specified by the Flow [command line flag][run] `--storage-path`. + of the `data-agent` directory located in the {{< param "PRODUCT_NAME" >}} working directory. The WAL data directory + may be different than the default depending on the path specified by the [command line flag][run] `--storage-path`. {{< admonition type="note" >}} - There is one `wal` directory per: - - * Metrics instance running in Static mode - * `prometheus.remote_write` component running in Flow mode + There is one `wal` directory per `prometheus.remote_write` component. {{< /admonition >}} -1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. +1. [Start][Stop] {{< param "PRODUCT_NAME" >}} and verify that the WAL is working correctly. @@ -525,8 +520,6 @@ Refer to the linked documentation for more details. [snappy]: https://en.wikipedia.org/wiki/Snappy_(compression) -[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block -[metrics config]: /docs/agent//static/configuration/metrics-config -[Stop]: /docs/agent//flow/get-started/start-agent -[wal_directory]: /docs/agent//static/configuration/metrics-config -[run]: /docs/agent//flow/reference/cli/run +[WAL block]: #wal-block +[Stop]: ../../../get-started/start-agent/ +[run]: ../../../reference/cli/run/ diff --git a/docs/sources/shared/deploy-alloy.md b/docs/sources/shared/deploy-alloy.md index 6c86f737ba..0eafc10152 100644 --- a/docs/sources/shared/deploy-alloy.md +++ b/docs/sources/shared/deploy-alloy.md @@ -15,12 +15,12 @@ This page lists common topologies used for deployments of {{% param "PRODUCT_NAM ## As a centralized collection service Deploying {{< param "PRODUCT_NAME" >}} as a centralized service is recommended for collecting application telemetry. -This topology allows you to use a smaller number of agents to coordinate service discovery, collection, and remote writing. +This topology allows you to use a smaller number of collectors to coordinate service discovery, collection, and remote writing. ![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) -Using this topology requires deploying the Agent on separate infrastructure, and making sure that agents can discover and reach these applications over the network. -The main predictor for the size of the agent is the number of active metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each series. +Using this topology requires deploying {{< param "PRODUCT_NAME" >}} on separate infrastructure, and making sure that they can discover and reach these applications over the network. +The main predictor for the size of {{< param "PRODUCT_NAME" >}} is the number of active metrics series it's scraping. A rule of thumb is approximately 10 KB of memory for each series. We recommend you start looking towards horizontal scaling around the 1 million active series mark. ### Using Kubernetes StatefulSets @@ -57,7 +57,7 @@ Deploying one {{< param "PRODUCT_NAME" >}} per machine is required for collectin Each {{< param "PRODUCT_NAME" >}} requires you to open an outgoing connection for each remote endpoint it’s shipping data to. This can lead to NAT port exhaustion on the egress infrastructure. Each egress IP can support up to (65535 - 1024 = 64511) outgoing connections on different ports. -So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 agents. +So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 collectors. ### Using Kubernetes DaemonSets @@ -66,13 +66,13 @@ The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and ### Pros * Doesn’t require running on separate infrastructure -* Typically leads to smaller-sized agents +* Typically leads to smaller-sized collectors * Lower network latency to instrumented applications ### Cons -* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift -* Not possible to scale agents independently when using Kubernetes DaemonSets +* Requires planning a process for provisioning {{< param "PRODUCT_NAME" >}} on new machines, as well as keeping configuration up to date to avoid configuration drift +* Not possible to scale independently when using Kubernetes DaemonSets * Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) ### Use for @@ -81,19 +81,19 @@ The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and ### Don’t use for -* Scenarios where Grafana Agent grows so large it can become a noisy neighbor +* Scenarios where {{< param "PRODUCT_NAME" >}} grows so large it can become a noisy neighbor * Collecting an unpredictable amount of telemetry ## As a container sidecar -Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized agent deployments. +Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized {{< param "PRODUCT_NAME" >}} deployments. ![daemonset](/media/docs/agent/agent-topologies/sidecar.png) ### Using Kubernetes Pod sidecars In a Kubernetes environment, the sidecar model consists of deploying {{< param "PRODUCT_NAME" >}} as an extra container on the Pod. -The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar agent. +The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar {{< param "PRODUCT_NAME" >}}. ### Pros @@ -115,7 +115,7 @@ The Pod’s controller, network configuration, enabled capabilities, and availab ### Don’t use for * Long-lived applications -* Scenarios where the agent size grows so large it can become a noisy neighbor +* Scenarios where the {{< param "PRODUCT_NAME" >}} size grows so large it can become a noisy neighbor [hashmod sharding]: https://grafana.com/docs/agent/latest/static/operation-guide/ diff --git a/docs/sources/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md index 58c62f792e..f266e1ce8d 100644 --- a/docs/sources/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -11,7 +11,7 @@ weight: 320 With the release of {{< param "PRODUCT_NAME" >}}, Grafana Agent Operator is no longer the recommended way to deploy {{< param "PRODUCT_ROOT_NAME" >}} in Kubernetes. Some of the Operator functionality has moved into {{< param "PRODUCT_NAME" >}} itself, and the Helm Chart has replaced the remaining functionality. -- The Monitor types (`PodMonitor`, `ServiceMonitor`, `Probe`, and `LogsInstance`) are all supported natively by {{< param "PRODUCT_NAME" >}}. +- The Monitor types (`PodMonitor`, `ServiceMonitor`, `Probe`, and `PodLogs`) are all supported natively by {{< param "PRODUCT_NAME" >}}. You are no longer required to use the Operator to consume those CRDs for dynamic monitoring in your cluster. - The parts of the Operator that deploy the {{< param "PRODUCT_ROOT_NAME" >}} itself (`GrafanaAgent`, `MetricsInstance`, and `LogsInstance` CRDs) are deprecated. Operator users should use the {{< param "PRODUCT_ROOT_NAME" >}} [Helm Chart][] to deploy {{< param "PRODUCT_ROOT_NAME" >}} directly to your clusters. From d751454329362fc4571a446fa620329a407cbc23 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Mon, 4 Mar 2024 08:04:04 -0800 Subject: [PATCH 011/136] Update readme to use Alloy in place of Agent (#23) --- README.md | 46 +++++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 74b29aa2fa..7ad9d74de7 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,22 @@ -

Grafana Agent logo

+

Grafana Alloy logo

-Grafana Agent is a vendor-neutral, batteries-included telemetry collector with +[Grafana Alloy][] is a vendor-neutral, batteries-included telemetry collector with configuration inspired by [Terraform][]. It is designed to be flexible, performant, and compatible with multiple ecosystems such as Prometheus and OpenTelemetry. -Grafana Agent is based around **components**. Components are wired together to +Grafana Alloy is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -> **NOTE**: This page focuses mainly on "[Flow mode][Grafana Agent Flow]," the -> Terraform-inspired revision of Grafana Agent. - -Grafana Agent can collect, transform, and send data to: +Grafana Alloy can collect, transform, and send data to: * The [Prometheus][] ecosystem * The [OpenTelemetry][] ecosystem * The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) [Terraform]: https://terraform.io -[Grafana Agent Flow]: https://grafana.com/docs/agent/latest/flow/ +[Grafana Alloy]: https://grafana.com/docs/alloy/latest/ [Prometheus]: https://prometheus.io [OpenTelemetry]: https://opentelemetry.io [Loki]: https://github.com/grafana/loki @@ -28,7 +25,7 @@ Grafana Agent can collect, transform, and send data to: [Mimir]: https://github.com/grafana/mimir [Pyroscope]: https://github.com/grafana/pyroscope -## Why use Grafana Agent? +## Why use Grafana Alloy? * **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. @@ -36,29 +33,28 @@ Grafana Agent can collect, transform, and send data to: continuous profiles. * **Scalable**: Deploy on any number of machines to collect millions of active series and terabytes of logs. -* **Battle-tested**: Grafana Agent extends the existing battle-tested code from +* **Battle-tested**: Grafana Alloy extends the existing battle-tested code from the Prometheus and OpenTelemetry Collector projects. * **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. * **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. -[UI]: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/#grafana-agent-flow-ui +[UI]: https://grafana.com/docs/alloy/latest/tasks/debug/#grafana-alloy-ui ## Getting started Check out our [documentation][] to see: -* [Installation instructions][] for Grafana Agent Flow -* Details about [Grafana Agent Flow][] -* Steps for [Getting started][] with Grafana Agent Flow -* The list of Grafana Agent Flow [Components][] +* [Installation instructions][] for Grafana Alloy +* Details about [Grafana Alloy][documentation] +* Steps for [Getting started][] with Grafana Alloy +* The list of Grafana Alloy [Components][] -[documentation]: https://grafana.com/docs/agent/latest/ -[Installation instructions]: https://grafana.com/docs/agent/latest/flow/setup/install/ -[Grafana Agent Flow]: https://grafana.com/docs/agent/latest/flow/ -[Getting started]: https://grafana.com/docs/agent/latest/flow/getting_started/ -[Components]: https://grafana.com/docs/agent/latest/flow/reference/components/ +[documentation]: https://grafana.com/docs/alloy/ +[Installation instructions]: https://grafana.com/docs/alloy/latest/setup/install/ +[Getting started]: https://grafana.com/docs/alloy/latest/getting_started/ +[Components]: https://grafana.com/docs/alloy/latest/reference/components/ ## Example @@ -94,7 +90,7 @@ prometheus.remote_write "default" { ``` We maintain an example [Docker Compose environment][] that can be used to -launch dependencies to play with Grafana Agent locally. +launch dependencies to play with Grafana Alloy locally. [Docker Compose environment]: ./example/docker-compose/ @@ -110,17 +106,17 @@ Patch and security releases may be created at any time. ## Community -To engage with the Grafana Agent community: +To engage with the Grafana Alloy community: * Chat with us on our community Slack channel. To invite yourself to the - Grafana Slack, visit and join the `#agent` + Grafana Slack, visit and join the `#alloy` channel. * Ask questions on the [Discussions page][]. * [File an issue][] for bugs, issues, and feature suggestions. * Attend the monthly [community call][]. -[Discussions page]: https://github.com/grafana/agent/discussions -[File an issue]: https://github.com/grafana/agent/issues/new +[Discussions page]: https://github.com/grafana/alloy/discussions +[File an issue]: https://github.com/grafana/alloy/issues/new [community call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo ## Contribute From 259e1bb074c1246f2c2eedb6a13e872eaad27fe1 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 12 Mar 2024 08:43:12 -0400 Subject: [PATCH 012/136] helm: update grafana-agent Helm chart to deploy Grafana Alloy --- .../apis/monitoring/v1alpha2/types.go | 4 +- operations/helm/README.md | 2 +- .../helm/charts/grafana-agent/CHANGELOG.md | 456 +----------------- .../helm/charts/grafana-agent/Chart.yaml | 2 +- .../helm/charts/grafana-agent/README.md | 182 ++++--- .../charts/grafana-agent/README.md.gotmpl | 107 ++-- .../crds/monitoring.grafana.com_podlogs.yaml | 3 +- .../grafana-agent/ci/clustering-values.yaml | 1 - .../grafana-agent/ci/static-mode-values.yaml | 2 - .../charts/grafana-agent/config/example.yaml | 3 - .../charts/grafana-agent/templates/NOTES.txt | 2 +- .../grafana-agent/templates/_config.tpl | 20 +- .../grafana-agent/templates/_helpers.tpl | 46 +- .../templates/cluster_service.yaml | 15 +- .../grafana-agent/templates/configmap.yaml | 19 +- .../templates/containers/_agent.yaml | 57 +-- .../templates/containers/_watch.yaml | 9 +- .../templates/controllers/_pod.yaml | 19 +- .../templates/controllers/daemonset.yaml | 8 +- .../templates/controllers/deployment.yaml | 8 +- .../templates/controllers/statefulset.yaml | 10 +- .../charts/grafana-agent/templates/hpa.yaml | 19 +- .../grafana-agent/templates/ingress.yaml | 14 +- .../charts/grafana-agent/templates/rbac.yaml | 12 +- .../grafana-agent/templates/service.yaml | 13 +- .../templates/serviceaccount.yaml | 4 +- .../templates/servicemonitor.yaml | 9 +- .../helm/charts/grafana-agent/values.yaml | 64 ++- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/statefulset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/deployment.yaml | 18 +- .../templates/controllers/deployment.yaml | 18 +- .../templates/controllers/statefulset.yaml | 18 +- .../templates/controllers/statefulset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../templates/controllers/daemonset.yaml | 18 +- .../grafana-agent/templates/configmap.yaml | 17 - .../templates/controllers/daemonset.yaml | 73 --- .../grafana-agent/templates/rbac.yaml | 117 ----- .../grafana-agent/templates/service.yaml | 23 - .../templates/serviceaccount.yaml | 13 - .../templates/controllers/deployment.yaml | 18 +- .../templates/controllers/daemonset.yaml | 16 +- 61 files changed, 528 insertions(+), 1327 deletions(-) delete mode 100644 operations/helm/charts/grafana-agent/ci/static-mode-values.yaml delete mode 100644 operations/helm/charts/grafana-agent/config/example.yaml delete mode 100644 operations/helm/tests/static-mode/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml delete mode 100644 operations/helm/tests/static-mode/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/static-mode/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml diff --git a/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/types.go b/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/types.go index f5c1f74a0c..16163ee04c 100644 --- a/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/types.go +++ b/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/types.go @@ -8,8 +8,8 @@ import ( // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:path="podlogs" -// +kubebuilder:resource:path="podlogs" -// +kubebuilder:resource:categories="grafana-agent" +// +kubebuilder:resource:categories="grafana-alloy" +// +kubebuilder:resource:categories="alloy" // PodLogs defines how to collect logs for a Pod. type PodLogs struct { diff --git a/operations/helm/README.md b/operations/helm/README.md index eeead5738d..f6fa836eb6 100644 --- a/operations/helm/README.md +++ b/operations/helm/README.md @@ -1,6 +1,6 @@ # Helm charts -This directory contains Helm charts for Grafana Agent. +This directory contains Helm charts for Grafana Alloy. ## Testing diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index a6a00a5f88..41dff46d15 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -7,459 +7,11 @@ This document contains a historical list of changes between releases. Only changes that impact end-user behavior are listed; changes to documentation or internal API changes are not present. -Unreleased ----------- - -### Features - -- Allow setting nodePort for service. (@ryayon) - -0.36.0 (2024-02-27) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.40.2. (@rfratto) - -0.35.0 (2024-02-27) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.40.1. (@rfratto) - -0.34.0 (2024-02-27) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.40.0. (@jcreixell) - -0.33.0 (2024-02-20) -------------------- - -### Features - -- Add HPA behavior support for scaling up and down. (@wildum) - -### Enhancements - -- Allow setting tlsConfig for serviceMonitor (@captncraig) -- Allow setting internalTrafficPolicy for service (@captncraig) - -0.32.0 (2024-02-15) -------------------- - -### Enhancements - -- Allow setting scheme for readiness checks when using tls. (@captncraig) - -- Update Grafana Agent version to v0.39.2. (@captncraig) - -0.31.1 (2024-01-19) -------------------- - -### Enhancements - -- Add `kubectl.kubernetes.io/default-container: grafana-agent` annotation to allow various tools to choose `grafana-agent` container as default target (@aerfio) - -- Add support for topology spread constraints in helm chart. (@etiennep) - -- Update Grafana Agent version to v0.39.1. (@marctc) - -### Bugfixes - -- Fix a bug preventing the `.Values.configReloader.image.digest` Helm value to be correctly retrieved. (@claudioscalzo) - -- Fix a bug preventing digests to be used as labels because of their length. Labels values [must be 63 characters or less](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). (@claudioscalzo) - -0.31.0 (2024-01-10) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.39.0. (@marctc) - -### Bugfixes - -- Configure namespace for service account when RBAC resources is created. (@hainenber) - -### Other changes - -- Change config reloader image to `ghcr.io/jimmidyson/configmap-reload:v0.12.0` to reflect change in repository and version. (@berendiwema) - -0.30.0 (2024-01-05) -------------------- - -### Enhancements - -- Update `rbac` to include necessary rules for the `otelcol.processor.k8sattributes` component. (@rlankfo) - -- Add `serviceAccount.additionalLabels` to values.yaml to enable setting additional labels on the created service account. (@zopanix) - -### Bugfixes - -- Statefulset should use value `.controller.enableStatefulSetAutoDeletePVC` instead of just `.enableStatefulSetAutoDeletePVC`. (@captncraig) - -0.29.0 (2023-11-30) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.38.1. (@rfratto) - -### Other changes - -- Update `container.securityContext` Helm value reference to renamed `agent.securityContext`. (@hainenber) - -0.28.0 (2023-11-21) -------------------- - -### Enhancements - -- Ensure that `app.kubernetes.io/version` label accounts for any - image tag overrides supplied to the chart Values. (@tristanburgess) - -- Update Grafana Agent version to v0.38.0. (@rfratto) - -0.27.2 (2023-11-07) ----------- - -### Enhancements - -- Expose the `ui-path-prefix` flag on the Helm chart. (@mlcdf) - -- Expose controller `extraAnnotations` on the Helm chart. (@mcanevet) - -- Update Grafana Agent version to v0.37.4. (@tpaschalis) - -0.27.1 (2023-10-26) ----------- - -### Enhancements - -- Update Grafana Agent version to v0.37.3. (@tpaschalis) - -### Bugfixes - -- Fix issue where CRDs were created with annotations after the `crds.create` - setting was introduced (@rfratto). - -0.27.0 (2023-10-12) ----------- - -### Enhancements - -- Add `secrets` and `configmaps` to cluster role for `remote.kubernetes.*` components. (@captncraig) - -- Update Grafana Agent version to v0.37.2. (@tpaschalis) - -0.26.0 (2023-10-10) -------------------- - -### Breaking changes - -- The `initContainers` setting has been moved to `controller.initContainers` - for consistency with other Pod-level settings. (@rfratto) - -### Enhancements - -- Make CRDs optional through the `crds.create` setting. (@bentonam, @rfratto) - -- Update Grafana Agent version to v0.37.1. (@tpaschalis) - -0.25.0 (2023-09-22) -------------------- - -### Enhancements - -- An image's digest can now be used in place of a tag. (@hainenber) - -- Add ServiceMonitor support. (@QuentinBisson) - -- Update Grafana Agent version to v0.36.2. (@ptodev) - -0.24.0 (2023-09-08) -------------------- - -### Enhancements - -- StatefulSets will now use `podManagementPolicy: Parallel` by default. To - disable this behavior, set `controller.parallelRollout` to `false`. - (@rfratto) - -0.23.0 (2023-09-06) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.36.1. (@erikbaranowski) - -- Enable clustering for deployments and daemonsets. (@tpaschalis) - -0.22.0 (2023-08-30) -------------------- - -- Update Grafana Agent version to v0.36.0. (@thampiotr) - -0.21.1 (2023-08-30) -------------------- - -- Condition parameter minReadySeconds on StatefulSet, Deployment, and DaemonSet to Kubernetes v1.22 clusters. - -0.21.0 (2023-08-15) -------------------- - -- Update Grafana Agent version to v0.35.4. (@mattdurham) - -0.20.0 (2023-08-09) -------------------- - -- Update Grafana Agent version to v0.35.3. (@tpaschalis) - -### Enhancements - -- Add support for initcontainers in helm chart. (@dwalker-sabiogroup) - -0.19.0 (2023-07-27) -------------------- - -### Enhancements - -- Set hostPID from values. (@korniltsev) - -- Set nodeSelector at podlevel. (@Flasheh) - -- Update Grafana Agent version to v0.35.2. (@rfratto) - -0.18.0 (2023-07-26) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.35.1. (@ptodev) - -0.17.0 (2023-07-19) -------------------- - -### Features - -- Add native support for Flow mode clustering with the - `agent.clustering.enabled` flag. Clustering may only be enabled in Flow mode - when deploying a StatefulSet. (@rfratto) - -### Enhancements - -- Set securityContext for configReloader container. (@yanehi) - -- Set securityContext at podlevel. (@yanehi) - -- Update Grafana Agent version to v0.35.0. (@mattdurham) - -0.16.0 (2023-06-20) -------------------- - -### Enhancements - -- Allow requests to be set on the config reloader container. (@tpaschalis) - -- Allow users of the helm chart to configure the image registry either at the image level or globally. (@QuentinBisson) - -- Don't specify replica count for StatefulSets when autoscaling is enabled (@captncraig) - -- Update Grafana Agent version to v0.34.2. (@captncraig) - -### Other changes - -- Make the agent and config-reloader container resources required when using - autoscaling. (@tpaschalis) - -0.15.0 (2023-06-08) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.34.0. (@captncraig) - -- Add HPA support for Deployments and StatefulSets. (@tpaschalis) - -- Make the Faro port optional. (@tpaschalis) - -- Rename the deprecated `serviceAccount` alias to `serviceAccountName` in - pod template. This is a no-op change. (@tpaschalis) - -### Bugfixes - -- Only set the deployment replicas when autoscaling is disabled. (@tiithansen) - -- Reorder HPA `spec.metrics` to avoid endless sync loop in ArgoCD. (@tiithansen) - -0.14.0 (2023-05-11) -------------------- - -### Enhancements - -- Add a toggle for enabling/disabling the Service. (@tpaschalis) - -- Update Grafana Agent version to v0.33.2. (@rfratto) - -0.13.0 (2023-05-01) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.33.1. (@spartan0x117) - -- Update RBAC rules to permit `node/metrics`. (@yurii-kryvosheia) - -0.12.0 (2023-04-25) -------------------- - -### Enhancements - -- Update Grafana Agent version to v0.33.0. (@rfratto) - -0.11.0 (2023-04-24) -------------------- - -### Enhancements - -- Add support for adding Annotations to Service (@ofirshtrull) -- Add `agent.envFrom` value. (@carlosjgp) -- Add `controller.hostNetwork` value. (@carlosjgp) -- Add `controller.dnsPolicy` value. (@carlosjgp) - -### Bugfixes - -- Fix issue where `controller.tolerations` setting was ignored. (@carlosjgp) -- Fix YAML indentation of some resources. (@carlosjgp) - -0.10.0 (2023-03-09) -------------------- - -### Enhancements - -- Add Horizontal Pod Autoscaling for controller type deployment. (@therealmanny) -- Add affinity values. (@therealmanny) - -0.9.0 (2023-03-14) ------------------- - -### Enhancements - -- Add PodMonitors, ServiceMonitors, and Probes to the agent ClusterRole. (@captncraig) -- Add podLabels values. (@therealmanny) - - -0.8.1 (2023-03-06) ------------------- - -### Enhancements - -- Add hostPort specification to extraPorts and extraPort documentation. (@pnathan) -- Selectively template ClusterIP. (@aglees) -- Add priorityClassName value. (@aglees) -- Update Grafana Agent version to v0.32.1. (@erikbaranowski) - -0.8.0 (2023-02-28) ------------------- - -### Enhancements - -- Update Grafana Agent version to v0.32.0. (@rfratto) - -0.7.1 (2023-02-27) ------------------- - -### Bugfixes - -- Fix issue where `.image.pullPolicy` was not being respected. (@rfratto) - -0.7.0 (2023-02-24) ------------------- - -### Enhancements - -- Helm chart: Add support for templates inside of configMap.content (@ts-mini) -- Add the necessary rbac to support eventhandler integration (@nvanheuverzwijn) - - -0.6.0 (2023-02-13) ------------------- - -### Enhancements - -- Update Grafana Agent version to v0.31.3. (@rfratto) - -0.5.0 (2023-02-08) ------------------- - -### Enhancements - -- Helm Chart: Add ingress and support for agent-receiver. (@ts-mini) - -### Documentation - -- Update Helm Chart documentation to reference new `loki.source.kubernetes` component. - -0.4.0 (2023-01-31) ------------------- - -### Enhancements - -- Update Grafana Agent version to v0.31.0. (@rfratto) -- Install PodLogs CRD for the `loki.source.podlogs` Flow component. (@rfratto) -- Update RBAC rules to permit `loki.source.podlogs` and `mimir.rules.kubernetes` to work by default. (@rfratto) - -0.3.1 (2023-01-31) ------------------- - -### Bugfixes - -- Fix `podAnnotations` values reference in pod template (should be `controller.podAnnotations`). -- Ensure the service gets a clusterIP assigned by default. - -0.3.0 (2023-01-23) ------------------- - -### Security - -- Change config reloader image to `jimmidyson/configmap-reload:v0.8.0` to resolve security scanner report. (@rfratto) - -0.2.3 (2023-01-17) ------------------- - -### Bugfixes - -- Sets correct arguments for starting the agent when static mode is selected. - -0.2.2 (2023-01-17) ------------------- - -### Bugfixes - -- Updated configmap template to use correct variable for populating configmap content - -0.2.1 (2023-01-12) ------------------- - -### Other changes - -- Updated documentation to remove warning about the chart not being ready for - use. - -0.2.0 (2023-01-12) +0.1.0 (2024-04-09) ------------------ ### Features -- Introduce supporting extra ports on the Grafana Agent created by Helm Chart. - -0.1.0 (2023-01-11) ------------------- - -### Features - -- Introduce a Grafana Agent Helm chart which supports Grafana Agent Flow. (@rfratto) - -[contributors guide]: ../../../../docs/developer/contributing.md +- Introduce a Grafana Alloy Helm chart. The Grafana Alloy Helm chart is + backwards compatibile with the values.yaml from the `grafana-agent` Helm + chart. (@rfratto) diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index 9bb6a1de2c..4fd9bf70e5 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: grafana-agent -description: 'Grafana Agent' +description: 'Grafana Alloy' type: application version: 0.36.0 appVersion: 'v0.40.2' diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 51ea7498b6..210249a348 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,10 +1,10 @@ -# Grafana Agent Helm chart +# Grafana Alloy Helm chart ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.36.0](https://img.shields.io/badge/Version-0.36.0-informational?style=flat-square) ![AppVersion: v0.40.2](https://img.shields.io/badge/AppVersion-v0.40.2-informational?style=flat-square) -Helm chart for deploying [Grafana Agent][] to Kubernetes. +Helm chart for deploying [Grafana Alloy][] to Kubernetes. -[Grafana Agent]: https://grafana.com/docs/agent/latest/ +[Grafana Alloy]: https://grafana.com/docs/alloy/latest/ ## Usage @@ -19,9 +19,9 @@ helm repo update To install the chart with the release name my-release: -`helm install my-release grafana/grafana-agent` +`helm install my-release grafana/alloy` -This chart installs one instance of Grafana Agent into your Kubernetes cluster +This chart installs one instance of Grafana Alloy into your Kubernetes cluster using a specific Kubernetes controller. By default, DaemonSet is used. The `controller.type` value can be used to change the controller to either a StatefulSet or Deployment. @@ -29,41 +29,32 @@ StatefulSet or Deployment. Creating multiple installations of the Helm chart with different controllers is useful if just using the default DaemonSet isn't sufficient. -## Flow mode is the default - -By default, [Grafana Agent Flow][Flow] is deployed. To opt out of Flow mode and -use the older mode (called "static mode"), set the `agent.mode` value to -`static`. - -[Flow]: https://grafana.com/docs/agent/latest/flow/ - ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| -| agent.clustering.enabled | bool | `false` | Deploy agents in a cluster to allow for load distribution. Only applies when agent.mode=flow. | -| agent.configMap.content | string | `""` | Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values. | -| agent.configMap.create | bool | `true` | Create a new ConfigMap for the config file. | -| agent.configMap.key | string | `nil` | Key in ConfigMap to get config from. | -| agent.configMap.name | string | `nil` | Name of existing ConfigMap to use. Used when create is false. | -| agent.enableReporting | bool | `true` | Enables sending Grafana Labs anonymous usage stats to help improve Grafana Agent. | -| agent.envFrom | list | `[]` | Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core | -| agent.extraArgs | list | `[]` | Extra args to pass to `agent run`: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ | -| agent.extraEnv | list | `[]` | Extra environment variables to pass to the agent container. | -| agent.extraPorts | list | `[]` | Extra ports to expose on the Agent | -| agent.listenAddr | string | `"0.0.0.0"` | Address to listen for traffic on. 0.0.0.0 exposes the UI to other containers. | -| agent.listenPort | int | `80` | Port to listen for traffic on. | -| agent.listenScheme | string | `"HTTP"` | Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS" | -| agent.mode | string | `"flow"` | Mode to run Grafana Agent in. Can be "flow" or "static". | -| agent.mounts.dockercontainers | bool | `false` | Mount /var/lib/docker/containers from the host into the container for log collection. | -| agent.mounts.extra | list | `[]` | Extra volume mounts to add into the Grafana Agent container. Does not affect the watch container. | -| agent.mounts.varlog | bool | `false` | Mount /var/log from the host into the container for log collection. | -| agent.resources | object | `{}` | Resource requests and limits to apply to the Grafana Agent container. | -| agent.securityContext | object | `{}` | Security context to apply to the Grafana Agent container. | -| agent.storagePath | string | `"/tmp/agent"` | Path to where Grafana Agent stores data (for example, the Write-Ahead Log). By default, data is lost between reboots. | -| agent.uiPathPrefix | string | `"/"` | Base path where the UI is exposed. | +| alloy.clustering.enabled | bool | `false` | Deploy Alloy in a cluster to allow for load distribution. | +| alloy.configMap.content | string | `""` | Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values. | +| alloy.configMap.create | bool | `true` | Create a new ConfigMap for the config file. | +| alloy.configMap.key | string | `nil` | Key in ConfigMap to get config from. | +| alloy.configMap.name | string | `nil` | Name of existing ConfigMap to use. Used when create is false. | +| alloy.enableReporting | bool | `true` | Enables sending Grafana Labs anonymous usage stats to help improve Grafana Alloy. | +| alloy.envFrom | list | `[]` | Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core | +| alloy.extraArgs | list | `[]` | Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/ | +| alloy.extraEnv | list | `[]` | Extra environment variables to pass to the Alloy container. | +| alloy.extraPorts | list | `[]` | Extra ports to expose on the Alloy container. | +| alloy.listenAddr | string | `"0.0.0.0"` | Address to listen for traffic on. 0.0.0.0 exposes the UI to other containers. | +| alloy.listenPort | int | `80` | Port to listen for traffic on. | +| alloy.listenScheme | string | `"HTTP"` | Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS" | +| alloy.mounts.dockercontainers | bool | `false` | Mount /var/lib/docker/containers from the host into the container for log collection. | +| alloy.mounts.extra | list | `[]` | Extra volume mounts to add into the Grafana Alloy container. Does not affect the watch container. | +| alloy.mounts.varlog | bool | `false` | Mount /var/log from the host into the container for log collection. | +| alloy.resources | object | `{}` | Resource requests and limits to apply to the Grafana Alloy container. | +| alloy.securityContext | object | `{}` | Security context to apply to the Grafana Alloy container. | +| alloy.storagePath | string | `"/tmp/alloy"` | Path to where Grafana Alloy stores data (for example, the Write-Ahead Log). By default, data is lost between reboots. | +| alloy.uiPathPrefix | string | `"/"` | Base path where the UI is exposed. | | configReloader.customArgs | list | `[]` | Override the args passed to the container. | -| configReloader.enabled | bool | `true` | Enables automatically reloading when the agent config changes. | +| configReloader.enabled | bool | `true` | Enables automatically reloading when the Alloy config changes. | | configReloader.image.digest | string | `""` | SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag` | | configReloader.image.registry | string | `"ghcr.io"` | Config reloader image registry (defaults to docker.io) | | configReloader.image.repository | string | `"jimmidyson/configmap-reload"` | Repository to get config reloader image from. | @@ -85,35 +76,35 @@ use the older mode (called "static mode"), set the `agent.mode` value to | controller.dnsPolicy | string | `"ClusterFirst"` | Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | controller.enableStatefulSetAutoDeletePVC | bool | `false` | Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'. | | controller.extraAnnotations | object | `{}` | Annotations to add to controller. | -| controller.extraContainers | list | `[]` | Additional containers to run alongside the agent container and initContainers. | +| controller.extraContainers | list | `[]` | Additional containers to run alongside the Alloy container and initContainers. | | controller.hostNetwork | bool | `false` | Configures Pods to use the host network. When set to true, the ports that will be used must be specified. | | controller.hostPID | bool | `false` | Configures Pods to use the host PID namespace. | | controller.initContainers | list | `[]` | | -| controller.nodeSelector | object | `{}` | nodeSelector to apply to Grafana Agent pods. | +| controller.nodeSelector | object | `{}` | nodeSelector to apply to Grafana Alloy pods. | | controller.parallelRollout | bool | `true` | Whether to deploy pods in parallel. Only used when controller.type is 'statefulset'. | | controller.podAnnotations | object | `{}` | Extra pod annotations to add. | | controller.podLabels | object | `{}` | Extra pod labels to add. | -| controller.priorityClassName | string | `""` | priorityClassName to apply to Grafana Agent pods. | +| controller.priorityClassName | string | `""` | priorityClassName to apply to Grafana Alloy pods. | | controller.replicas | int | `1` | Number of pods to deploy. Ignored when controller.type is 'daemonset'. | -| controller.tolerations | list | `[]` | Tolerations to apply to Grafana Agent pods. | -| controller.topologySpreadConstraints | list | `[]` | Topology Spread Constraints to apply to Grafana Agent pods. | -| controller.type | string | `"daemonset"` | Type of controller to use for deploying Grafana Agent in the cluster. Must be one of 'daemonset', 'deployment', or 'statefulset'. | +| controller.tolerations | list | `[]` | Tolerations to apply to Grafana Alloy pods. | +| controller.topologySpreadConstraints | list | `[]` | Topology Spread Constraints to apply to Grafana Alloy pods. | +| controller.type | string | `"daemonset"` | Type of controller to use for deploying Grafana Alloy in the cluster. Must be one of 'daemonset', 'deployment', or 'statefulset'. | | controller.updateStrategy | object | `{}` | Update strategy for updating deployed Pods. | | controller.volumeClaimTemplates | list | `[]` | volumeClaimTemplates to add when controller.type is 'statefulset'. | -| controller.volumes.extra | list | `[]` | Extra volumes to add to the Grafana Agent pod. | +| controller.volumes.extra | list | `[]` | Extra volumes to add to the Grafana Alloy pod. | | crds.create | bool | `true` | Whether to install CRDs for monitoring. | | fullnameOverride | string | `nil` | Overrides the chart's computed fullname. Used to change the full prefix of resource names. | | global.image.pullSecrets | list | `[]` | Optional set of global image pull secrets. | | global.image.registry | string | `""` | Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...) | -| global.podSecurityContext | object | `{}` | Security context to apply to the Grafana Agent pod. | -| image.digest | string | `nil` | Grafana Agent image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. | -| image.pullPolicy | string | `"IfNotPresent"` | Grafana Agent image pull policy. | +| global.podSecurityContext | object | `{}` | Security context to apply to the Grafana Alloy pod. | +| image.digest | string | `nil` | Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. | +| image.pullPolicy | string | `"IfNotPresent"` | Grafana Alloy image pull policy. | | image.pullSecrets | list | `[]` | Optional set of image pull secrets. | -| image.registry | string | `"docker.io"` | Grafana Agent image registry (defaults to docker.io) | -| image.repository | string | `"grafana/agent"` | Grafana Agent image repository. | -| image.tag | string | `nil` | Grafana Agent image tag. When empty, the Chart's appVersion is used. | +| image.registry | string | `"docker.io"` | Grafana Alloy image registry (defaults to docker.io) | +| image.repository | string | `"grafana/alloy"` | Grafana Alloy image repository. | +| image.tag | string | `nil` | Grafana Alloy image tag. When empty, the Chart's appVersion is used. | | ingress.annotations | object | `{}` | | -| ingress.enabled | bool | `false` | Enables ingress for the agent (faro port) | +| ingress.enabled | bool | `false` | Enables ingress for Alloy (Faro port) | | ingress.extraPaths | list | `[]` | | | ingress.faroPort | int | `12347` | | | ingress.hosts[0] | string | `"chart-example.local"` | | @@ -122,7 +113,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | ingress.pathType | string | `"Prefix"` | | | ingress.tls | list | `[]` | | | nameOverride | string | `nil` | Overrides the chart's name. Used to change the infix in the resource names. | -| rbac.create | bool | `true` | Whether to create RBAC resources for the agent. | +| rbac.create | bool | `true` | Whether to create RBAC resources for Alloy. | | service.annotations | object | `{}` | | | service.clusterIP | string | `""` | Cluster IP, can be set to None, empty "" or an IP address | | service.enabled | bool | `true` | Creates a Service for the controller's pods. | @@ -131,7 +122,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | service.type | string | `"ClusterIP"` | Service type | | serviceAccount.additionalLabels | object | `{}` | Additional labels to add to the created service account. | | serviceAccount.annotations | object | `{}` | Annotations to add to the created service account. | -| serviceAccount.create | bool | `true` | Whether to create a service account for the Grafana Agent deployment. | +| serviceAccount.create | bool | `true` | Whether to create a service account for the Grafana Alloy deployment. | | serviceAccount.name | string | `nil` | The name of the existing service account to use when serviceAccount.create is false. | | serviceMonitor.additionalLabels | object | `{}` | Additional labels for the service monitor. | | serviceMonitor.enabled | bool | `false` | | @@ -140,19 +131,19 @@ use the older mode (called "static mode"), set the `agent.mode` value to | serviceMonitor.relabelings | list | `[]` | RelabelConfigs to apply to samples before scraping ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig | | serviceMonitor.tlsConfig | object | `{}` | Customize tls parameters for the service monitor | -### agent.extraArgs +### alloy.extraArgs -`agent.extraArgs` allows for passing extra arguments to the Grafana Agent -container. The list of available arguments is documented on [agent run][]. +`alloy.extraArgs` allows for passing extra arguments to the Grafana Alloy +container. The list of available arguments is documented on [alloy run][]. -> **WARNING**: Using `agent.extraArgs` does not have a stable API. Things may +> **WARNING**: Using `alloy.extraArgs` does not have a stable API. Things may > break between Chart upgrade if an argument gets added to the template. -[agent run]: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ +[alloy run]: https://grafana.com/docs/alloy/latest/reference/cli/run/ -### agent.extraPorts +### alloy.extraPorts -`agent.extraPorts` allows for configuring specific open ports. +`alloy.extraPorts` allows for configuring specific open ports. The detained specification of ports can be found at the [Kubernetes Pod documents](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#ports). @@ -165,28 +156,28 @@ Port numbers specified must be 0 < x < 65535. | name | name | If specified, this must be an `IANA_SVC_NAME` and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. | protocol | protocol | Must be UDP, TCP, or SCTP. Defaults to "TCP". | -### agent.listenAddr +### alloy.listenAddr -`agent.listenAddr` allows for restricting which address the agent listens on +`alloy.listenAddr` allows for restricting which address Alloy listens on for network traffic on its HTTP server. By default, this is `0.0.0.0` to allow its UI to be exposed when port-forwarding and to expose its metrics to other -agents in the cluster. +Alloy instances in the cluster. -### agent.configMap.config +### alloy.configMap.config -`agent.configMap.content` holds the Grafana Agent configuration to use. +`alloy.configMap.content` holds the Grafana Alloy configuration to use. -If `agent.configMap.content` is not provided, a [default configuration file][default-config] is -used. When provided, `agent.configMap.content` must hold a valid River configuration file. +If `alloy.configMap.content` is not provided, a [default configuration file][default-config] is +used. When provided, `alloy.configMap.content` must hold a valid River configuration file. [default-config]: ./config/example.river -### agent.securityContext +### alloy.securityContext -`agent.securityContext` sets the securityContext passed to the Grafana -Agent container. +`alloy.securityContext` sets the securityContext passed to the Grafana +Alloy container. -By default, Grafana Agent containers are not able to collect telemetry from the +By default, Grafana Alloy containers are not able to collect telemetry from the host node or other specific types of privileged telemetry data. See [Collecting logs from other containers][#collecting-logs-from-other-containers] and [Collecting host node telemetry][#collecting-host-node-telemetry] below for @@ -195,24 +186,21 @@ more information on how to enable these capabilities. ### rbac.create `rbac.create` enables the creation of ClusterRole and ClusterRoleBindings for -the Grafana Agent containers to use. The default permission set allows Flow +the Grafana Alloy containers to use. The default permission set allows Flow components like [discovery.kubernetes][] to work properly. -[discovery.kubernetes]: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/ +[discovery.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/ ### controller.autoscaling `controller.autoscaling.enabled` enables the creation of a HorizontalPodAutoscaler. It is only used when `controller.type` is set to `deployment` or `statefulset`. -`controller.autoscaling` is intended to be used with an -[app_agent_receiver-configured][app_agent_receiver] Grafana Agent or for -[clustered][] mode. +`controller.autoscaling` is intended to be used with [clustered][] mode. -> **WARNING**: Using `controller.autoscaling` for any other Grafana Agent +> **WARNING**: Using `controller.autoscaling` for any other Grafana Alloy > configuration could lead to redundant or double telemetry collection. -[app_agent_receiver]: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/app-agent-receiver-config/ -[clustered]: https://grafana.com/docs/agent/latest/flow/reference/cli/run/#clustered-mode-experimental +[clustered]: https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode When using autoscaling with a StatefulSet controller and have enabled volumeClaimTemplates to be created alongside the StatefulSet, it is possible to @@ -222,39 +210,33 @@ Kubernetes version `>=1.23-0` and your cluster has the `enableStatefulSetAutoDeletePVC` to true to automatically delete stale PVCs. Using `controller.autoscaling` requires the target metric (cpu/memory) to have -its resource requests set up for both the agent and config-reloader containers +its resource requests set up for both the Alloy and config-reloader containers so that the HPA can use them to calculate the replica count from the actual resource utilization. ## Collecting logs from other containers There are two ways to collect logs from other containers within the cluster -the agent is deployed in. +Alloy is deployed in. -### Versions >= 0.31.x +### loki.source.kubernetes -The [loki.source.kubernetes][] component introduced in 0.31.0 may be used to -collect logs as an alternative to tailing files from the host. This component -does not require mounting the hosts filesystem into the Agent, nor requires -additional security contexts to work correctly. - -However, `loki.source.kubernetes` is experimental and may have issues not -present in the file-based approach. +The [loki.source.kubernetes][] component may be used to collect logs from +containers using the Kubernetes API. This component does not require mounting +the hosts filesystem into the Agent, nor requires additional security contexts +to work correctly. [loki.source.kubernetes]: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/ -### Versions < 0.31.x - -For those running the Agent on versions prior to 0.31.0, the only way to collect logs -from other containers is to mount `/var/lib/docker/containers` from the host and read -the log files directly. +### File-based collection -This capability is disabled by default. +Logs may also be collected by mounting the host's filesystem into the Alloy +container, bypassing the need to communicate with the Kubrnetes API. -To expose logs from other containers to Grafana Agent: +To mount logs from other containers to Grafana Alloy directly: -* Set `agent.mounts.dockercontainers` to `true`. -* Set `agent.securityContext` to: +* Set `alloy.mounts.dockercontainers` to `true`. +* Set `alloy.securityContext` to: ```yaml privileged: true runAsUser: 0 @@ -263,13 +245,13 @@ To expose logs from other containers to Grafana Agent: ## Collecting host node telemetry Telemetry from the host, such as host-specific log files (from `/var/logs`) or -metrics from `/proc` and `/sys` are not accessible to Grafana Agent containers. +metrics from `/proc` and `/sys` are not accessible to Grafana Alloy containers. -To expose this information to Grafana Agent for telemetry collection: +To expose this information to Grafana Alloy for telemetry collection: -* Set `agent.mounts.dockercontainers` to `true`. +* Set `alloy.mounts.dockercontainers` to `true`. * Mount `/proc` and `/sys` from the host into the container. -* Set `agent.securityContext` to: +* Set `alloy.securityContext` to: ```yaml privileged: true runAsUser: 0 diff --git a/operations/helm/charts/grafana-agent/README.md.gotmpl b/operations/helm/charts/grafana-agent/README.md.gotmpl index e9bbe8ece5..981dd11296 100644 --- a/operations/helm/charts/grafana-agent/README.md.gotmpl +++ b/operations/helm/charts/grafana-agent/README.md.gotmpl @@ -1,10 +1,10 @@ -# Grafana Agent Helm chart +# Grafana Alloy Helm chart {{ template "chart.typeBadge" . }}{{ template "chart.versionBadge" . }}{{ template "chart.appVersionBadge" . }} -Helm chart for deploying [Grafana Agent][] to Kubernetes. +Helm chart for deploying [Grafana Alloy][] to Kubernetes. -[Grafana Agent]: https://grafana.com/docs/agent/latest/ +[Grafana Alloy]: https://grafana.com/docs/alloy/latest/ ## Usage @@ -19,9 +19,9 @@ helm repo update To install the chart with the release name my-release: -`helm install my-release grafana/grafana-agent` +`helm install my-release grafana/alloy` -This chart installs one instance of Grafana Agent into your Kubernetes cluster +This chart installs one instance of Grafana Alloy into your Kubernetes cluster using a specific Kubernetes controller. By default, DaemonSet is used. The `controller.type` value can be used to change the controller to either a StatefulSet or Deployment. @@ -29,29 +29,21 @@ StatefulSet or Deployment. Creating multiple installations of the Helm chart with different controllers is useful if just using the default DaemonSet isn't sufficient. -## Flow mode is the default - -By default, [Grafana Agent Flow][Flow] is deployed. To opt out of Flow mode and -use the older mode (called "static mode"), set the `agent.mode` value to -`static`. - -[Flow]: https://grafana.com/docs/agent/latest/flow/ - {{ template "chart.valuesSection" . }} -### agent.extraArgs +### alloy.extraArgs -`agent.extraArgs` allows for passing extra arguments to the Grafana Agent -container. The list of available arguments is documented on [agent run][]. +`alloy.extraArgs` allows for passing extra arguments to the Grafana Alloy +container. The list of available arguments is documented on [alloy run][]. -> **WARNING**: Using `agent.extraArgs` does not have a stable API. Things may +> **WARNING**: Using `alloy.extraArgs` does not have a stable API. Things may > break between Chart upgrade if an argument gets added to the template. -[agent run]: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ +[alloy run]: https://grafana.com/docs/alloy/latest/reference/cli/run/ -### agent.extraPorts +### alloy.extraPorts -`agent.extraPorts` allows for configuring specific open ports. +`alloy.extraPorts` allows for configuring specific open ports. The detained specification of ports can be found at the [Kubernetes Pod documents](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#ports). @@ -64,28 +56,28 @@ Port numbers specified must be 0 < x < 65535. | name | name | If specified, this must be an `IANA_SVC_NAME` and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. | protocol | protocol | Must be UDP, TCP, or SCTP. Defaults to "TCP". | -### agent.listenAddr +### alloy.listenAddr -`agent.listenAddr` allows for restricting which address the agent listens on +`alloy.listenAddr` allows for restricting which address Alloy listens on for network traffic on its HTTP server. By default, this is `0.0.0.0` to allow its UI to be exposed when port-forwarding and to expose its metrics to other -agents in the cluster. +Alloy instances in the cluster. -### agent.configMap.config +### alloy.configMap.config -`agent.configMap.content` holds the Grafana Agent configuration to use. +`alloy.configMap.content` holds the Grafana Alloy configuration to use. -If `agent.configMap.content` is not provided, a [default configuration file][default-config] is -used. When provided, `agent.configMap.content` must hold a valid River configuration file. +If `alloy.configMap.content` is not provided, a [default configuration file][default-config] is +used. When provided, `alloy.configMap.content` must hold a valid River configuration file. [default-config]: ./config/example.river -### agent.securityContext +### alloy.securityContext -`agent.securityContext` sets the securityContext passed to the Grafana -Agent container. +`alloy.securityContext` sets the securityContext passed to the Grafana +Alloy container. -By default, Grafana Agent containers are not able to collect telemetry from the +By default, Grafana Alloy containers are not able to collect telemetry from the host node or other specific types of privileged telemetry data. See [Collecting logs from other containers][#collecting-logs-from-other-containers] and [Collecting host node telemetry][#collecting-host-node-telemetry] below for @@ -94,24 +86,21 @@ more information on how to enable these capabilities. ### rbac.create `rbac.create` enables the creation of ClusterRole and ClusterRoleBindings for -the Grafana Agent containers to use. The default permission set allows Flow +the Grafana Alloy containers to use. The default permission set allows Flow components like [discovery.kubernetes][] to work properly. -[discovery.kubernetes]: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/ +[discovery.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/ ### controller.autoscaling `controller.autoscaling.enabled` enables the creation of a HorizontalPodAutoscaler. It is only used when `controller.type` is set to `deployment` or `statefulset`. -`controller.autoscaling` is intended to be used with an -[app_agent_receiver-configured][app_agent_receiver] Grafana Agent or for -[clustered][] mode. +`controller.autoscaling` is intended to be used with [clustered][] mode. -> **WARNING**: Using `controller.autoscaling` for any other Grafana Agent +> **WARNING**: Using `controller.autoscaling` for any other Grafana Alloy > configuration could lead to redundant or double telemetry collection. -[app_agent_receiver]: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/app-agent-receiver-config/ -[clustered]: https://grafana.com/docs/agent/latest/flow/reference/cli/run/#clustered-mode-experimental +[clustered]: https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode When using autoscaling with a StatefulSet controller and have enabled volumeClaimTemplates to be created alongside the StatefulSet, it is possible to @@ -121,39 +110,33 @@ Kubernetes version `>=1.23-0` and your cluster has the `enableStatefulSetAutoDeletePVC` to true to automatically delete stale PVCs. Using `controller.autoscaling` requires the target metric (cpu/memory) to have -its resource requests set up for both the agent and config-reloader containers +its resource requests set up for both the Alloy and config-reloader containers so that the HPA can use them to calculate the replica count from the actual resource utilization. ## Collecting logs from other containers There are two ways to collect logs from other containers within the cluster -the agent is deployed in. +Alloy is deployed in. -### Versions >= 0.31.x +### loki.source.kubernetes -The [loki.source.kubernetes][] component introduced in 0.31.0 may be used to -collect logs as an alternative to tailing files from the host. This component -does not require mounting the hosts filesystem into the Agent, nor requires -additional security contexts to work correctly. - -However, `loki.source.kubernetes` is experimental and may have issues not -present in the file-based approach. +The [loki.source.kubernetes][] component may be used to collect logs from +containers using the Kubernetes API. This component does not require mounting +the hosts filesystem into the Agent, nor requires additional security contexts +to work correctly. [loki.source.kubernetes]: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/ -### Versions < 0.31.x - -For those running the Agent on versions prior to 0.31.0, the only way to collect logs -from other containers is to mount `/var/lib/docker/containers` from the host and read -the log files directly. +### File-based collection -This capability is disabled by default. +Logs may also be collected by mounting the host's filesystem into the Alloy +container, bypassing the need to communicate with the Kubrnetes API. -To expose logs from other containers to Grafana Agent: +To mount logs from other containers to Grafana Alloy directly: -* Set `agent.mounts.dockercontainers` to `true`. -* Set `agent.securityContext` to: +* Set `alloy.mounts.dockercontainers` to `true`. +* Set `alloy.securityContext` to: ```yaml privileged: true runAsUser: 0 @@ -162,13 +145,13 @@ To expose logs from other containers to Grafana Agent: ## Collecting host node telemetry Telemetry from the host, such as host-specific log files (from `/var/logs`) or -metrics from `/proc` and `/sys` are not accessible to Grafana Agent containers. +metrics from `/proc` and `/sys` are not accessible to Grafana Alloy containers. -To expose this information to Grafana Agent for telemetry collection: +To expose this information to Grafana Alloy for telemetry collection: -* Set `agent.mounts.dockercontainers` to `true`. +* Set `alloy.mounts.dockercontainers` to `true`. * Mount `/proc` and `/sys` from the host into the container. -* Set `agent.securityContext` to: +* Set `alloy.securityContext` to: ```yaml privileged: true runAsUser: 0 diff --git a/operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml b/operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml index 514732e68e..78678324c2 100644 --- a/operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml +++ b/operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml @@ -10,7 +10,8 @@ spec: group: monitoring.grafana.com names: categories: - - grafana-agent + - grafana-alloy + - alloy kind: PodLogs listKind: PodLogsList plural: podlogs diff --git a/operations/helm/charts/grafana-agent/ci/clustering-values.yaml b/operations/helm/charts/grafana-agent/ci/clustering-values.yaml index 26ab267c5d..d6a74017b4 100644 --- a/operations/helm/charts/grafana-agent/ci/clustering-values.yaml +++ b/operations/helm/charts/grafana-agent/ci/clustering-values.yaml @@ -1,5 +1,4 @@ agent: - mode: 'flow' clustering: enabled: true diff --git a/operations/helm/charts/grafana-agent/ci/static-mode-values.yaml b/operations/helm/charts/grafana-agent/ci/static-mode-values.yaml deleted file mode 100644 index 9d4dc793ec..0000000000 --- a/operations/helm/charts/grafana-agent/ci/static-mode-values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -agent: - mode: static diff --git a/operations/helm/charts/grafana-agent/config/example.yaml b/operations/helm/charts/grafana-agent/config/example.yaml deleted file mode 100644 index 209ab93d73..0000000000 --- a/operations/helm/charts/grafana-agent/config/example.yaml +++ /dev/null @@ -1,3 +0,0 @@ -server: - log_level: info - log_format: logfmt diff --git a/operations/helm/charts/grafana-agent/templates/NOTES.txt b/operations/helm/charts/grafana-agent/templates/NOTES.txt index 41bde70281..2c3176b687 100644 --- a/operations/helm/charts/grafana-agent/templates/NOTES.txt +++ b/operations/helm/charts/grafana-agent/templates/NOTES.txt @@ -1 +1 @@ -Welcome to Grafana Agent! +Welcome to Grafana Alloy! diff --git a/operations/helm/charts/grafana-agent/templates/_config.tpl b/operations/helm/charts/grafana-agent/templates/_config.tpl index db2edaaf9a..722b955caa 100644 --- a/operations/helm/charts/grafana-agent/templates/_config.tpl +++ b/operations/helm/charts/grafana-agent/templates/_config.tpl @@ -2,11 +2,12 @@ Retrieve configMap name from the name of the chart or the ConfigMap the user specified. */}} -{{- define "grafana-agent.config-map.name" -}} -{{- if .Values.agent.configMap.name -}} -{{- .Values.agent.configMap.name }} +{{- define "alloy.config-map.name" -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} +{{- if $values.configMap.name -}} +{{- $values.configMap.name }} {{- else -}} -{{- include "grafana-agent.fullname" . }} +{{- include "alloy.fullname" . }} {{- end }} {{- end }} @@ -14,12 +15,11 @@ specified. The name of the config file is the default or the key the user specified in the ConfigMap. */}} -{{- define "grafana-agent.config-map.key" -}} -{{- if .Values.agent.configMap.key -}} -{{- .Values.agent.configMap.key }} -{{- else if eq .Values.agent.mode "flow" -}} +{{- define "alloy.config-map.key" -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} +{{- if $values.configMap.key -}} +{{- $values.configMap.key }} +{{- else -}} config.river -{{- else if eq .Values.agent.mode "static" -}} -config.yaml {{- end }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/_helpers.tpl b/operations/helm/charts/grafana-agent/templates/_helpers.tpl index 7b2e825e88..864c77856b 100644 --- a/operations/helm/charts/grafana-agent/templates/_helpers.tpl +++ b/operations/helm/charts/grafana-agent/templates/_helpers.tpl @@ -1,7 +1,7 @@ {{/* Expand the name of the chart. */}} -{{- define "grafana-agent.name" -}} +{{- define "alloy.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} @@ -10,7 +10,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "grafana-agent.fullname" -}} +{{- define "alloy.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} @@ -26,7 +26,7 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "grafana-agent.chart" -}} +{{- define "alloy.chart" -}} {{- if index .Values "$chart_tests" }} {{- printf "%s" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- else }} @@ -37,7 +37,7 @@ Create chart name and version as used by the chart label. {{/* Allow the release namespace to be overridden for multi-namespace deployments in combined charts */}} -{{- define "grafana-agent.namespace" -}} +{{- define "alloy.namespace" -}} {{- if .Values.namespaceOverride }} {{- .Values.namespaceOverride }} {{- else }} @@ -48,17 +48,17 @@ Allow the release namespace to be overridden for multi-namespace deployments in {{/* Common labels */}} -{{- define "grafana-agent.labels" -}} -helm.sh/chart: {{ include "grafana-agent.chart" . }} -{{ include "grafana-agent.selectorLabels" . }} +{{- define "alloy.labels" -}} +helm.sh/chart: {{ include "alloy.chart" . }} +{{ include "alloy.selectorLabels" . }} {{- if index .Values "$chart_tests" }} app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: {{ .Release.Service }} {{- else }} -{{/* substr trims delimeter prefix char from grafana-agent.imageId output +{{/* substr trims delimeter prefix char from alloy.imageId output e.g. ':' for tags and '@' for digests. For digests, we crop the string to a 7-char (short) sha. */}} -app.kubernetes.io/version: {{ (include "grafana-agent.imageId" .) | trunc 15 | trimPrefix "@sha256" | trimPrefix ":" | quote }} +app.kubernetes.io/version: {{ (include "alloy.imageId" .) | trunc 15 | trimPrefix "@sha256" | trimPrefix ":" | quote }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{- end }} @@ -66,26 +66,26 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "grafana-agent.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana-agent.name" . }} +{{- define "alloy.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alloy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} -{{- define "grafana-agent.serviceAccountName" -}} +{{- define "alloy.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} -{{- default (include "grafana-agent.fullname" .) .Values.serviceAccount.name }} +{{- default (include "alloy.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} {{/* -Calculate name of image ID to use for "grafana-agent". +Calculate name of image ID to use for "alloy. */}} -{{- define "grafana-agent.imageId" -}} +{{- define "alloy.imageId" -}} {{- if .Values.image.digest }} {{- $digest := .Values.image.digest }} {{- if not (hasPrefix "sha256:" $digest) }} @@ -119,7 +119,7 @@ Calculate name of image ID to use for "config-reloader". {{/* Return the appropriate apiVersion for ingress. */}} -{{- define "grafana-agent.ingress.apiVersion" -}} +{{- define "alloy.ingress.apiVersion" -}} {{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} {{- print "networking.k8s.io/v1" }} {{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} @@ -132,21 +132,19 @@ Return the appropriate apiVersion for ingress. {{/* Return if ingress is stable. */}} -{{- define "grafana-agent.ingress.isStable" -}} -{{- eq (include "grafana-agent.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- define "alloy.ingress.isStable" -}} +{{- eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1" }} {{- end }} {{/* Return if ingress supports ingressClassName. */}} -{{- define "grafana-agent.ingress.supportsIngressClassName" -}} -{{- or (eq (include "grafana-agent.ingress.isStable" .) "true") (and (eq (include "grafana-agent.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- define "alloy.ingress.supportsIngressClassName" -}} +{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} {{- end }} {{/* Return if ingress supports pathType. */}} -{{- define "grafana-agent.ingress.supportsPathType" -}} -{{- or (eq (include "grafana-agent.ingress.isStable" .) "true") (and (eq (include "grafana-agent.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- define "alloy.ingress.supportsPathType" -}} +{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} {{- end }} - - diff --git a/operations/helm/charts/grafana-agent/templates/cluster_service.yaml b/operations/helm/charts/grafana-agent/templates/cluster_service.yaml index 7d271bce1e..1bc940b780 100644 --- a/operations/helm/charts/grafana-agent/templates/cluster_service.yaml +++ b/operations/helm/charts/grafana-agent/templates/cluster_service.yaml @@ -1,15 +1,16 @@ -{{- if and (eq .Values.agent.mode "flow") (.Values.agent.clustering.enabled) -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} +{{- if $values.clustering.enabled -}} apiVersion: v1 kind: Service metadata: - name: {{ include "grafana-agent.fullname" . }}-cluster + name: {{ include "alloy.fullname" . }}-cluster labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} spec: type: ClusterIP clusterIP: 'None' selector: - {{- include "grafana-agent.selectorLabels" . | nindent 4 }} + {{- include "alloy.selectorLabels" . | nindent 4 }} ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -18,10 +19,10 @@ spec: # This service should only be used for clustering, and not metric # collection. - name: http - port: {{ .Values.agent.listenPort }} - targetPort: {{ .Values.agent.listenPort }} + port: {{ $values.listenPort }} + targetPort: {{ $values.listenPort }} protocol: "TCP" - {{- range $portMap := .Values.agent.extraPorts }} + {{- range $portMap := $values.extraPorts }} - name: {{ $portMap.name }} port: {{ $portMap.port }} targetPort: {{ $portMap.targetPort }} diff --git a/operations/helm/charts/grafana-agent/templates/configmap.yaml b/operations/helm/charts/grafana-agent/templates/configmap.yaml index 299bd9a7d2..26f798d49d 100644 --- a/operations/helm/charts/grafana-agent/templates/configmap.yaml +++ b/operations/helm/charts/grafana-agent/templates/configmap.yaml @@ -1,22 +1,15 @@ -{{- if .Values.agent.configMap.create }} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} +{{- if $values.configMap.create }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} data: - {{- if eq .Values.agent.mode "flow" }} - {{- if .Values.agent.configMap.content }} - config.river: |- {{- (tpl .Values.agent.configMap.content .) | nindent 4 }} + {{- if $values.configMap.content }} + config.river: |- {{- (tpl $values.configMap.content .) | nindent 4 }} {{- else }} config.river: |- {{- .Files.Get "config/example.river" | trim | nindent 4 }} {{- end }} - {{- else if eq .Values.agent.mode "static" }} - {{- if .Values.agent.configMap.content }} - config.yaml: |- {{- (tpl .Values.agent.configMap.content .) | nindent 4 }} - {{- else }} - config.yaml: |- {{- .Files.Get "config/example.yaml" | trim | nindent 4 }} - {{- end }} - {{- end }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml b/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml index 0066f198ca..496a167ee0 100644 --- a/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml +++ b/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml @@ -1,49 +1,42 @@ -{{- define "grafana-agent.container" -}} -- name: grafana-agent - image: {{ .Values.global.image.registry | default .Values.image.registry }}/{{ .Values.image.repository }}{{ include "grafana-agent.imageId" . }} +{{- define "alloy.container" -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} +- name: alloy + image: {{ .Values.global.image.registry | default .Values.image.registry }}/{{ .Values.image.repository }}{{ include "alloy.imageId" . }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - {{- if eq .Values.agent.mode "flow"}} - run - - /etc/agent/{{ include "grafana-agent.config-map.key" . }} - - --storage.path={{ .Values.agent.storagePath }} - - --server.http.listen-addr={{ .Values.agent.listenAddr }}:{{ .Values.agent.listenPort }} - - --server.http.ui-path-prefix={{ .Values.agent.uiPathPrefix }} - {{- if not .Values.agent.enableReporting }} + - /etc/alloy/{{ include "alloy.config-map.key" . }} + - --storage.path={{ $values.storagePath }} + - --server.http.listen-addr={{ $values.listenAddr }}:{{ $values.listenPort }} + - --server.http.ui-path-prefix={{ $values.uiPathPrefix }} + {{- if not $values.enableReporting }} - --disable-reporting {{- end}} - {{- if .Values.agent.clustering.enabled }} + {{- if $values.clustering.enabled }} - --cluster.enabled=true - - --cluster.join-addresses={{ include "grafana-agent.fullname" . }}-cluster + - --cluster.join-addresses={{ include "alloy.fullname" . }}-cluster {{- end}} - {{- end}} - {{- if eq .Values.agent.mode "static"}} - - -config.file=/etc/agent/{{ include "grafana-agent.config-map.key" . }} - - -server.http.address={{ .Values.agent.listenAddr }}:{{ .Values.agent.listenPort }} - {{- end}} - {{- range .Values.agent.extraArgs }} + {{- range $values.extraArgs }} - {{ . }} {{- end}} env: - - name: AGENT_MODE - value: {{ .Values.agent.mode }} - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME valueFrom: fieldRef: fieldPath: spec.nodeName - {{- range .Values.agent.extraEnv }} + {{- range $values.extraEnv }} - {{- toYaml . | nindent 6 }} {{- end }} - {{- if .Values.agent.envFrom }} + {{- if $values.envFrom }} envFrom: - {{- toYaml .Values.agent.envFrom | nindent 4 }} + {{- toYaml $values.envFrom | nindent 4 }} {{- end }} ports: - - containerPort: {{ .Values.agent.listenPort }} + - containerPort: {{ $values.listenPort }} name: http-metrics - {{- range $portMap := .Values.agent.extraPorts }} + {{- range $portMap := $values.extraPorts }} - containerPort: {{ $portMap.targetPort }} {{- if $portMap.hostPort }} hostPort: {{ $portMap.hostPort }} @@ -54,32 +47,32 @@ readinessProbe: httpGet: path: /-/ready - port: {{ .Values.agent.listenPort }} - scheme: {{ .Values.agent.listenScheme }} + port: {{ $values.listenPort }} + scheme: {{ $values.listenScheme }} initialDelaySeconds: 10 timeoutSeconds: 1 - {{- with .Values.agent.resources }} + {{- with $values.resources }} resources: {{- toYaml . | nindent 4 }} {{- end }} - {{- with .Values.agent.securityContext }} + {{- with $values.securityContext }} securityContext: {{- toYaml . | nindent 4 }} {{- end }} volumeMounts: - name: config - mountPath: /etc/agent - {{- if .Values.agent.mounts.varlog }} + mountPath: /etc/alloy + {{- if $values.mounts.varlog }} - name: varlog mountPath: /var/log readOnly: true {{- end }} - {{- if .Values.agent.mounts.dockercontainers }} + {{- if $values.mounts.dockercontainers }} - name: dockercontainers mountPath: /var/lib/docker/containers readOnly: true {{- end }} - {{- range .Values.agent.mounts.extra }} + {{- range $values.mounts.extra }} - {{- toYaml . | nindent 6 }} {{- end }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/containers/_watch.yaml b/operations/helm/charts/grafana-agent/templates/containers/_watch.yaml index 250e4b4f0d..4a0a9008f5 100644 --- a/operations/helm/charts/grafana-agent/templates/containers/_watch.yaml +++ b/operations/helm/charts/grafana-agent/templates/containers/_watch.yaml @@ -1,4 +1,5 @@ -{{- define "grafana-agent.watch-container" -}} +{{- define "alloy.watch-container" -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} {{- if .Values.configReloader.enabled -}} - name: config-reloader image: {{ .Values.global.image.registry | default .Values.configReloader.image.registry }}/{{ .Values.configReloader.image.repository }}{{ include "config-reloader.imageId" . }} @@ -7,12 +8,12 @@ {{- toYaml .Values.configReloader.customArgs | nindent 4 }} {{- else }} args: - - --volume-dir=/etc/agent - - --webhook-url=http://localhost:{{ .Values.agent.listenPort }}/-/reload + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:{{ $values.listenPort }}/-/reload {{- end }} volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy {{- with .Values.configReloader.resources }} resources: {{- toYaml . | nindent 4 }} diff --git a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml index 235bc279ec..94625fca2b 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml @@ -1,12 +1,13 @@ -{{- define "grafana-agent.pod-template" -}} +{{- define "alloy.pod-template" -}} +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy {{- with .Values.controller.podAnnotations }} {{- toYaml . | nindent 4 }} {{- end }} labels: - {{- include "grafana-agent.selectorLabels" . | nindent 4 }} + {{- include "alloy.selectorLabels" . | nindent 4 }} {{- with .Values.controller.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} @@ -15,7 +16,7 @@ spec: securityContext: {{- toYaml . | nindent 4 }} {{- end }} - serviceAccountName: {{ include "grafana-agent.serviceAccountName" . }} + serviceAccountName: {{ include "alloy.serviceAccountName" . }} {{- if or .Values.global.image.pullSecrets .Values.image.pullSecrets }} imagePullSecrets: {{- if .Values.global.image.pullSecrets }} @@ -31,8 +32,8 @@ spec: {{- end }} {{- end }} containers: - {{- include "grafana-agent.container" . | nindent 4 }} - {{- include "grafana-agent.watch-container" . | nindent 4 }} + {{- include "alloy.container" . | nindent 4 }} + {{- include "alloy.watch-container" . | nindent 4 }} {{- with .Values.controller.extraContainers }} {{- toYaml . | nindent 4 }} {{- end}} @@ -65,13 +66,13 @@ spec: volumes: - name: config configMap: - name: {{ include "grafana-agent.config-map.name" . }} - {{- if .Values.agent.mounts.varlog }} + name: {{ include "alloy.config-map.name" . }} + {{- if $values.mounts.varlog }} - name: varlog hostPath: path: /var/log {{- end }} - {{- if .Values.agent.mounts.dockercontainers }} + {{- if $values.mounts.dockercontainers }} - name: dockercontainers hostPath: path: /var/lib/docker/containers diff --git a/operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml index 4eb29780a2..2f80948c21 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml @@ -2,9 +2,9 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.controller.extraAnnotations }} annotations: {{- toYaml . | nindent 4 }} @@ -15,9 +15,9 @@ spec: {{- end }} selector: matchLabels: - {{- include "grafana-agent.selectorLabels" . | nindent 6 }} + {{- include "alloy.selectorLabels" . | nindent 6 }} template: - {{- include "grafana-agent.pod-template" . | nindent 4 }} + {{- include "alloy.pod-template" . | nindent 4 }} {{- with .Values.controller.updateStrategy }} updateStrategy: {{- toYaml . | nindent 4 }} diff --git a/operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml index 7cc900bc1c..8e748cac59 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml @@ -2,9 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.controller.extraAnnotations }} annotations: {{- toYaml . | nindent 4 }} @@ -18,9 +18,9 @@ spec: {{- end }} selector: matchLabels: - {{- include "grafana-agent.selectorLabels" . | nindent 6 }} + {{- include "alloy.selectorLabels" . | nindent 6 }} template: - {{- include "grafana-agent.pod-template" . | nindent 4 }} + {{- include "alloy.pod-template" . | nindent 4 }} {{- with .Values.controller.updateStrategy }} strategy: {{- toYaml . | nindent 4 }} diff --git a/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml index a4965d1420..2c4b1b6f01 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml @@ -5,9 +5,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.controller.extraAnnotations }} annotations: {{- toYaml . | nindent 4 }} @@ -22,12 +22,12 @@ spec: {{- if ge (int .Capabilities.KubeVersion.Minor) 22 }} minReadySeconds: 10 {{- end }} - serviceName: {{ include "grafana-agent.fullname" . }} + serviceName: {{ include "alloy.fullname" . }} selector: matchLabels: - {{- include "grafana-agent.selectorLabels" . | nindent 6 }} + {{- include "alloy.selectorLabels" . | nindent 6 }} template: - {{- include "grafana-agent.pod-template" . | nindent 4 }} + {{- include "alloy.pod-template" . | nindent 4 }} {{- with .Values.controller.updateStrategy }} updateStrategy: {{- toYaml . | nindent 4 }} diff --git a/operations/helm/charts/grafana-agent/templates/hpa.yaml b/operations/helm/charts/grafana-agent/templates/hpa.yaml index 829fbcc9b2..c3a532f327 100644 --- a/operations/helm/charts/grafana-agent/templates/hpa.yaml +++ b/operations/helm/charts/grafana-agent/templates/hpa.yaml @@ -1,27 +1,28 @@ +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} {{- if and (or (eq .Values.controller.type "deployment") (eq .Values.controller.type "statefulset" )) .Values.controller.autoscaling.enabled }} -{{- if not (empty .Values.controller.autoscaling.targetMemoryUtilizationPercentage)}} - {{- $_ := .Values.agent.resources.requests | required ".Values.agent.resources.requests is required when using autoscaling." -}} - {{- $_ := .Values.agent.resources.requests.memory | required ".Values.agent.resources.requests.memory is required when using autoscaling based on memory utilization." -}} +{{- if not (empty .Values.controller.autoscaling.targetMemoryUtilizationPercentage)}} + {{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}} + {{- $_ := $values.resources.requests.memory | required ".Values.alloy.resources.requests.memory is required when using autoscaling based on memory utilization." -}} {{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}} {{- $_ := .Values.configReloader.resources.requests.memory | required ".Values.configReloader.resources.requests.memory is required when using autoscaling based on memory utilization." -}} {{- end}} -{{- if not (empty .Values.controller.autoscaling.targetCPUUtilizationPercentage)}} - {{- $_ := .Values.agent.resources.requests | required ".Values.agent.resources.requests is required when using autoscaling." -}} - {{- $_ := .Values.agent.resources.requests.cpu | required ".Values.agent.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}} +{{- if not (empty .Values.controller.autoscaling.targetCPUUtilizationPercentage)}} + {{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}} + {{- $_ := $values.resources.requests.cpu | required ".Values.alloy.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}} {{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}} {{- $_ := .Values.configReloader.resources.requests.cpu | required ".Values.configReloader.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}} {{- end}} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: {{ .Values.controller.type }} - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} {{- with .Values.controller.autoscaling }} minReplicas: {{ .minReplicas }} maxReplicas: {{ .maxReplicas }} diff --git a/operations/helm/charts/grafana-agent/templates/ingress.yaml b/operations/helm/charts/grafana-agent/templates/ingress.yaml index 1d2dcae4e5..f8ee0da81b 100644 --- a/operations/helm/charts/grafana-agent/templates/ingress.yaml +++ b/operations/helm/charts/grafana-agent/templates/ingress.yaml @@ -1,19 +1,19 @@ {{- if .Values.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "grafana-agent.ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "grafana-agent.ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "grafana-agent.ingress.supportsPathType" .) "true" -}} -{{- $fullName := include "grafana-agent.fullname" . -}} +{{- $ingressApiIsStable := eq (include "alloy.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "alloy.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "alloy.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "alloy.fullname" . -}} {{- $servicePort := .Values.ingress.faroPort -}} {{- $ingressPath := .Values.ingress.path -}} {{- $ingressPathType := .Values.ingress.pathType -}} {{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: {{ include "grafana-agent.ingress.apiVersion" . }} +apiVersion: {{ include "alloy.ingress.apiVersion" . }} kind: Ingress metadata: name: {{ $fullName }} - namespace: {{ include "grafana-agent.namespace" . }} + namespace: {{ include "alloy.namespace" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.ingress.labels }} {{- toYaml . | nindent 4 }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/rbac.yaml b/operations/helm/charts/grafana-agent/templates/rbac.yaml index af0d30665a..d75a568012 100644 --- a/operations/helm/charts/grafana-agent/templates/rbac.yaml +++ b/operations/helm/charts/grafana-agent/templates/rbac.yaml @@ -2,9 +2,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} rules: # Rules which allow discovery.kubernetes to function. - apiGroups: @@ -96,15 +96,15 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} subjects: - kind: ServiceAccount - name: {{ include "grafana-agent.serviceAccountName" . }} + name: {{ include "alloy.serviceAccountName" . }} namespace: {{ .Release.Namespace }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/service.yaml b/operations/helm/charts/grafana-agent/templates/service.yaml index 34427f822a..12f1b98889 100644 --- a/operations/helm/charts/grafana-agent/templates/service.yaml +++ b/operations/helm/charts/grafana-agent/templates/service.yaml @@ -1,10 +1,11 @@ +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} {{- if .Values.service.enabled -}} apiVersion: v1 kind: Service metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.service.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -15,17 +16,17 @@ spec: clusterIP: {{ .Values.service.clusterIP }} {{- end }} selector: - {{- include "grafana-agent.selectorLabels" . | nindent 4 }} + {{- include "alloy.selectorLabels" . | nindent 4 }} internalTrafficPolicy: {{.Values.service.internalTrafficPolicy}} ports: - name: http-metrics {{- if eq .Values.service.type "NodePort" }} nodePort: {{ .Values.service.nodePort }} {{- end }} - port: {{ .Values.agent.listenPort }} - targetPort: {{ .Values.agent.listenPort }} + port: {{ $values.listenPort }} + targetPort: {{ $values.listenPort }} protocol: "TCP" -{{- range $portMap := .Values.agent.extraPorts }} +{{- range $portMap := $values.extraPorts }} - name: {{ $portMap.name }} port: {{ $portMap.port }} targetPort: {{ $portMap.targetPort }} diff --git a/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml b/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml index f2d2c90c6e..fc02f365bb 100644 --- a/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml @@ -2,10 +2,10 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "grafana-agent.serviceAccountName" . }} + name: {{ include "alloy.serviceAccountName" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.serviceAccount.additionalLabels }} {{- toYaml . | nindent 4 }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/servicemonitor.yaml b/operations/helm/charts/grafana-agent/templates/servicemonitor.yaml index fff347286c..15cbf7ff2e 100644 --- a/operations/helm/charts/grafana-agent/templates/servicemonitor.yaml +++ b/operations/helm/charts/grafana-agent/templates/servicemonitor.yaml @@ -1,17 +1,18 @@ +{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}} {{- if and .Values.service.enabled .Values.serviceMonitor.enabled -}} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ include "grafana-agent.fullname" . }} + name: {{ include "alloy.fullname" . }} labels: - {{- include "grafana-agent.labels" . | nindent 4 }} + {{- include "alloy.labels" . | nindent 4 }} {{- with .Values.serviceMonitor.additionalLabels }} {{- toYaml . | nindent 4 }} {{- end }} spec: endpoints: - port: http-metrics - scheme: {{ .Values.agent.listenScheme | lower }} + scheme: {{ $values.listenScheme | lower }} honorLabels: true {{- if .Values.serviceMonitor.interval }} interval: {{ .Values.serviceMonitor.interval }} @@ -30,5 +31,5 @@ spec: {{- end }} selector: matchLabels: - {{- include "grafana-agent.selectorLabels" . | nindent 6 }} + {{- include "alloy.selectorLabels" . | nindent 6 }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/grafana-agent/values.yaml index 588274fa05..bb3c3529dd 100644 --- a/operations/helm/charts/grafana-agent/values.yaml +++ b/operations/helm/charts/grafana-agent/values.yaml @@ -15,17 +15,16 @@ global: # -- Optional set of global image pull secrets. pullSecrets: [] - # -- Security context to apply to the Grafana Agent pod. + # -- Security context to apply to the Grafana Alloy pod. podSecurityContext: {} crds: # -- Whether to install CRDs for monitoring. create: true -# Various agent settings. -agent: - # -- Mode to run Grafana Agent in. Can be "flow" or "static". - mode: 'flow' +## Various Alloy settings. For backwards compatibility with the grafana-agent +## chart, this field may also be called "agent". +alloy: configMap: # -- Create a new ConfigMap for the config file. create: true @@ -38,13 +37,12 @@ agent: key: null clustering: - # -- Deploy agents in a cluster to allow for load distribution. Only - # applies when agent.mode=flow. + # -- Deploy Alloy in a cluster to allow for load distribution. enabled: false - # -- Path to where Grafana Agent stores data (for example, the Write-Ahead Log). + # -- Path to where Grafana Alloy stores data (for example, the Write-Ahead Log). # By default, data is lost between reboots. - storagePath: /tmp/agent + storagePath: /tmp/alloy # -- Address to listen for traffic on. 0.0.0.0 exposes the UI to other # containers. @@ -60,19 +58,19 @@ agent: uiPathPrefix: / # -- Enables sending Grafana Labs anonymous usage stats to help improve Grafana - # Agent. + # Alloy. enableReporting: true - # -- Extra environment variables to pass to the agent container. + # -- Extra environment variables to pass to the Alloy container. extraEnv: [] # -- Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core envFrom: [] - # -- Extra args to pass to `agent run`: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ + # -- Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/ extraArgs: [] - # -- Extra ports to expose on the Agent + # -- Extra ports to expose on the Alloy container. extraPorts: [] # - name: "faro" # port: 12347 @@ -86,37 +84,37 @@ agent: # collection. dockercontainers: false - # -- Extra volume mounts to add into the Grafana Agent container. Does not + # -- Extra volume mounts to add into the Grafana Alloy container. Does not # affect the watch container. extra: [] - # -- Security context to apply to the Grafana Agent container. + # -- Security context to apply to the Grafana Alloy container. securityContext: {} - # -- Resource requests and limits to apply to the Grafana Agent container. + # -- Resource requests and limits to apply to the Grafana Alloy container. resources: {} image: - # -- Grafana Agent image registry (defaults to docker.io) + # -- Grafana Alloy image registry (defaults to docker.io) registry: "docker.io" - # -- Grafana Agent image repository. - repository: grafana/agent - # -- (string) Grafana Agent image tag. When empty, the Chart's appVersion is + # -- Grafana Alloy image repository. + repository: grafana/alloy + # -- (string) Grafana Alloy image tag. When empty, the Chart's appVersion is # used. tag: null - # -- Grafana Agent image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. + # -- Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. digest: null - # -- Grafana Agent image pull policy. + # -- Grafana Alloy image pull policy. pullPolicy: IfNotPresent # -- Optional set of image pull secrets. pullSecrets: [] rbac: - # -- Whether to create RBAC resources for the agent. + # -- Whether to create RBAC resources for Alloy. create: true serviceAccount: - # -- Whether to create a service account for the Grafana Agent deployment. + # -- Whether to create a service account for the Grafana Alloy deployment. create: true # -- Additional labels to add to the created service account. additionalLabels: {} @@ -128,7 +126,7 @@ serviceAccount: # Options for the extra controller used for config reloading. configReloader: - # -- Enables automatically reloading when the agent config changes. + # -- Enables automatically reloading when the Alloy config changes. enabled: true image: # -- Config reloader image registry (defaults to docker.io) @@ -150,7 +148,7 @@ configReloader: securityContext: {} controller: - # -- Type of controller to use for deploying Grafana Agent in the cluster. + # -- Type of controller to use for deploying Grafana Alloy in the cluster. # Must be one of 'daemonset', 'deployment', or 'statefulset'. type: 'daemonset' @@ -176,16 +174,16 @@ controller: # -- Update strategy for updating deployed Pods. updateStrategy: {} - # -- nodeSelector to apply to Grafana Agent pods. + # -- nodeSelector to apply to Grafana Alloy pods. nodeSelector: {} - # -- Tolerations to apply to Grafana Agent pods. + # -- Tolerations to apply to Grafana Alloy pods. tolerations: [] - # -- Topology Spread Constraints to apply to Grafana Agent pods. + # -- Topology Spread Constraints to apply to Grafana Alloy pods. topologySpreadConstraints: [] - # -- priorityClassName to apply to Grafana Agent pods. + # -- priorityClassName to apply to Grafana Alloy pods. priorityClassName: '' # -- Extra pod annotations to add. @@ -235,7 +233,7 @@ controller: affinity: {} volumes: - # -- Extra volumes to add to the Grafana Agent pod. + # -- Extra volumes to add to the Grafana Alloy pod. extra: [] # -- volumeClaimTemplates to add when controller.type is 'statefulset'. @@ -246,7 +244,7 @@ controller: ## initContainers: [] - # -- Additional containers to run alongside the agent container and initContainers. + # -- Additional containers to run alongside the Alloy container and initContainers. extraContainers: [] service: @@ -289,7 +287,7 @@ serviceMonitor: # replacement: $1 # action: replace ingress: - # -- Enables ingress for the agent (faro port) + # -- Enables ingress for Alloy (Faro port) enabled: false # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml index 356c736349..97d9cbfb9b 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index a8601aec54..ba6d8e8ba7 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -22,27 +22,25 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - --cluster.join-addresses=grafana-agent-cluster env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -61,15 +59,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index de44f0e848..727b30100f 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,18 +54,18 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - mountPath: /cache name: cache-volume - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index ef4397494e..1786dc9e1e 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index 356c736349..97d9cbfb9b 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index 86f6b61d85..efd291c67f 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -59,15 +57,15 @@ spec: memory: 100Mi volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index df8f327002..a0bde9d5bc 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -20,25 +20,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -57,15 +55,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 1a00f1123e..084e210b31 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -21,25 +21,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -61,15 +59,15 @@ spec: memory: 100Mi volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index 8fd3dbde8d..823bd087b6 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -22,25 +22,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -59,15 +57,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index 356c736349..97d9cbfb9b 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index 356c736349..97d9cbfb9b 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml index 4eb807a30b..10c6c433d7 100644 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index 356c736349..97d9cbfb9b 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index 425efb8183..5fc992ce56 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -59,15 +57,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index 7c266c655f..a32b52ae04 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/my-config.river - - --storage.path=/tmp/agent + - /etc/alloy/my-config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index 0844db8bf1..b0eacff778 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -65,15 +63,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 47042b9de5..d01572ae7d 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -59,15 +57,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index b712b997ee..c755dd92e6 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -59,15 +57,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 4b1470e1a8..bf6d4190b7 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -31,18 +31,16 @@ spec: imagePullSecrets: - name: global-cred containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -61,15 +59,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 2210cfb0ce..2f3ed25026 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: quay.io/grafana/agent:v0.40.2 + - name: alloy + image: quay.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: quay.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 9523a2b09b..a6e441ea11 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -44,18 +44,16 @@ spec: - emptyDir: {} name: geoip containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -74,18 +72,18 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - mountPath: /etc/geoip name: geoip - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index e0a94d77b2..fe2e40bf2a 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -28,18 +28,16 @@ spec: imagePullSecrets: - name: local-cred containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -58,15 +56,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 2210cfb0ce..2f3ed25026 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: quay.io/grafana/agent:v0.40.2 + - name: alloy + image: quay.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: quay.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 544681c5aa..8fdfbd71c9 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml index 34e0be4da1..0cdcbbe73a 100644 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy testAnnotationKey: testAnnotationValue labels: app.kubernetes.io/name: grafana-agent @@ -27,18 +27,16 @@ spec: spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -57,15 +55,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml index 426f1f9072..410fa04519 100644 --- a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,18 +54,18 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - mountPath: /etc/geoip name: geoip - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/configmap.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 3290febf92..0000000000 --- a/operations/helm/tests/static-mode/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.yaml: |- - server: - log_level: info - log_format: logfmt diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml deleted file mode 100644 index 6cc6602fa2..0000000000 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -# Source: grafana-agent/templates/controllers/daemonset.yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - minReadySeconds: 10 - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: grafana-agent - labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - spec: - serviceAccountName: grafana-agent - containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 - imagePullPolicy: IfNotPresent - args: - - -config.file=/etc/agent/config.yaml - - -server.http.address=0.0.0.0:80 - env: - - name: AGENT_MODE - value: static - - name: AGENT_DEPLOY_MODE - value: "helm" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ports: - - containerPort: 80 - name: http-metrics - readinessProbe: - httpGet: - path: /-/ready - port: 80 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 1 - volumeMounts: - - name: config - mountPath: /etc/agent - - name: config-reloader - image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 - args: - - --volume-dir=/etc/agent - - --webhook-url=http://localhost:80/-/reload - volumeMounts: - - name: config - mountPath: /etc/agent - resources: - requests: - cpu: 1m - memory: 5Mi - dnsPolicy: ClusterFirst - volumes: - - name: config - configMap: - name: grafana-agent diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/rbac.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/static-mode/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/service.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/static-mode/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml index cb482e7f9f..7faf74aa1c 100644 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml @@ -20,25 +20,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent - image: docker.io/grafana/agent:v0.40.2 + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -57,15 +55,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml index d52c502d3c..57eb2cb87a 100644 --- a/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml @@ -19,25 +19,23 @@ spec: template: metadata: annotations: - kubectl.kubernetes.io/default-container: grafana-agent + kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent spec: serviceAccountName: grafana-agent containers: - - name: grafana-agent + - name: alloy image: docker.io/grafana/agent@sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf imagePullPolicy: IfNotPresent args: - run - - /etc/agent/config.river - - --storage.path=/tmp/agent + - /etc/alloy/config.river + - --storage.path=/tmp/alloy - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ env: - - name: AGENT_MODE - value: flow - name: AGENT_DEPLOY_MODE value: "helm" - name: HOSTNAME @@ -56,15 +54,15 @@ spec: timeoutSeconds: 1 volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy - name: config-reloader image: docker.io/jimmidyson/configmap-reload@sha256:5af9d3041d12a3e63f115125f89b66d2ba981fe82e64302ac370c5496055059c args: - - --volume-dir=/etc/agent + - --volume-dir=/etc/alloy - --webhook-url=http://localhost:80/-/reload volumeMounts: - name: config - mountPath: /etc/agent + mountPath: /etc/alloy resources: requests: cpu: 1m From 8af971ce6a5b5e1e4de9e465fc6f2de7367fdbc8 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 12 Mar 2024 08:46:58 -0400 Subject: [PATCH 013/136] helm: rename charts/grafana-agent to charts/alloy --- .github/workflows/helm-release.yml.disabled | 2 +- .github/workflows/helm-test.yml | 2 +- Makefile | 2 +- .../tasks/configure/configure-kubernetes.md | 8 +- docs/sources/tasks/migrate/from-operator.md | 2 +- .../internal/apis/monitoring/v1alpha2/doc.go | 2 +- operations/helm/Makefile | 2 +- operations/helm/README.md | 2 +- .../{grafana-agent => alloy}/.helmignore | 0 .../{grafana-agent => alloy}/CHANGELOG.md | 0 .../{grafana-agent => alloy}/Chart.yaml | 2 +- .../charts/{grafana-agent => alloy}/README.md | 0 .../{grafana-agent => alloy}/README.md.gotmpl | 0 .../charts/crds/Chart.yaml | 0 .../crds/monitoring.grafana.com_podlogs.yaml | 0 ...dditional-serviceaccount-label-values.yaml | 0 .../ci/clustering-values.yaml | 0 .../ci/controller-volumes-extra-values.yaml | 0 .../create-daemonset-hostnetwork-values.yaml | 0 .../ci/create-daemonset-values.yaml | 0 .../create-deployment-autoscaling-values.yaml | 0 .../ci/create-deployment-values.yaml | 0 ...create-statefulset-autoscaling-values.yaml | 0 .../ci/create-statefulset-values.yaml | 0 .../ci/custom-config-values.yaml | 0 .../ci/default-values-values.yaml | 0 .../ci/enable-servicemonitor-tls-values.yaml | 0 .../ci/enable-servicemonitor-values.yaml | 0 .../ci/envFrom-values.yaml | 0 .../ci/existing-config-values.yaml | 0 .../ci/extra-env-values.yaml | 0 .../ci/extra-ports-values.yaml | 0 .../ci/faro-ingress-values.yaml | 0 .../ci/global-image-pullsecrets-values.yaml | 0 .../ci/global-image-registry-values.yaml | 0 .../ci/initcontainers-values.yaml | 0 .../ci/local-image-pullsecrets-values.yaml | 0 .../ci/local-image-registry-values.yaml | 0 .../nodeselectors-and-tolerations-values.yaml | 0 .../ci/pod_annotations-values.yaml | 0 .../ci/sidecars-values.yaml | 0 .../ci/topologyspreadconstraints-values.yaml | 0 .../ci/with-digests-values.yaml | 0 .../config/example.river | 0 .../templates/NOTES.txt | 0 .../templates/_config.tpl | 0 .../templates/_helpers.tpl | 0 .../templates/cluster_service.yaml | 0 .../templates/configmap.yaml | 0 .../templates/containers/_agent.yaml | 0 .../templates/containers/_watch.yaml | 0 .../templates/controllers/_pod.yaml | 0 .../templates/controllers/daemonset.yaml | 0 .../templates/controllers/deployment.yaml | 0 .../templates/controllers/statefulset.yaml | 0 .../templates/hpa.yaml | 0 .../templates/ingress.yaml | 0 .../templates/rbac.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../{grafana-agent => alloy}/values.yaml | 0 .../templates/configmap.yaml | 10 +- .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy}/templates/rbac.yaml | 24 ++-- .../alloy}/templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 14 +++ .../templates/serviceaccount.yaml | 14 --- .../templates/cluster_service.yaml | 14 +-- .../alloy}/templates/configmap.yaml | 10 +- .../templates/controllers/statefulset.yaml | 26 ++-- .../alloy}/templates/rbac.yaml | 24 ++-- .../alloy}/templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 13 ++ .../templates/serviceaccount.yaml | 13 -- .../alloy}/templates/configmap.yaml | 10 +- .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy}/templates/rbac.yaml | 24 ++-- .../alloy}/templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 13 ++ .../templates/serviceaccount.yaml | 13 -- .../templates/configmap.yaml | 10 +- .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy}/templates/rbac.yaml | 24 ++-- .../alloy}/templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 13 ++ .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/deployment.yaml | 22 ++-- .../templates/hpa.yaml | 12 +- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/deployment.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/statefulset.yaml | 24 ++-- .../templates/hpa.yaml | 12 +- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/statefulset.yaml | 24 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../templates/configmap.yaml | 10 +- .../templates/controllers/daemonset.yaml | 22 ++-- .../custom-config/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../default-values/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../templates/servicemonitor.yaml | 14 +-- .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 74 +++++++++++ .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../alloy/templates/servicemonitor.yaml | 21 ++++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../templates/controllers/daemonset.yaml | 74 ----------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../templates/servicemonitor.yaml | 21 ---- .../envFrom/alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../tests/envFrom/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../envFrom/alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../envFrom/grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../templates/controllers/daemonset.yaml | 20 +-- .../existing-config/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../extra-env/alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../tests/extra-env/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../extra-env/alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../extra-ports/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../templates/ingress.yaml | 12 +- .../faro-ingress/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../templates/service.yaml | 14 +-- .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../initcontainers/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../pod_annotations/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../sidecars/alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../tests/sidecars/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../sidecars/alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/deployment.yaml | 22 ++-- .../alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- .../alloy/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 22 ++-- .../with-digests/alloy/templates/rbac.yaml | 117 ++++++++++++++++++ .../with-digests/alloy/templates/service.yaml | 23 ++++ .../alloy/templates/serviceaccount.yaml | 13 ++ .../grafana-agent/templates/configmap.yaml | 42 ------- .../grafana-agent/templates/rbac.yaml | 117 ------------------ .../grafana-agent/templates/service.yaml | 23 ---- .../templates/serviceaccount.yaml | 13 -- 305 files changed, 5157 insertions(+), 5157 deletions(-) rename operations/helm/charts/{grafana-agent => alloy}/.helmignore (100%) rename operations/helm/charts/{grafana-agent => alloy}/CHANGELOG.md (100%) rename operations/helm/charts/{grafana-agent => alloy}/Chart.yaml (89%) rename operations/helm/charts/{grafana-agent => alloy}/README.md (100%) rename operations/helm/charts/{grafana-agent => alloy}/README.md.gotmpl (100%) rename operations/helm/charts/{grafana-agent => alloy}/charts/crds/Chart.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/charts/crds/crds/monitoring.grafana.com_podlogs.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/additional-serviceaccount-label-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/clustering-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/controller-volumes-extra-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-daemonset-hostnetwork-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-daemonset-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-deployment-autoscaling-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-deployment-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-statefulset-autoscaling-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/create-statefulset-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/custom-config-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/default-values-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/enable-servicemonitor-tls-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/enable-servicemonitor-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/envFrom-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/existing-config-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/extra-env-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/extra-ports-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/faro-ingress-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/global-image-pullsecrets-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/global-image-registry-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/initcontainers-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/local-image-pullsecrets-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/local-image-registry-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/nodeselectors-and-tolerations-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/pod_annotations-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/sidecars-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/topologyspreadconstraints-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/ci/with-digests-values.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/config/example.river (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/NOTES.txt (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/_config.tpl (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/_helpers.tpl (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/cluster_service.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/configmap.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/containers/_agent.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/containers/_watch.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/controllers/_pod.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/controllers/deployment.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/controllers/statefulset.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/hpa.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/ingress.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/rbac.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/service.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/serviceaccount.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/templates/servicemonitor.yaml (100%) rename operations/helm/charts/{grafana-agent => alloy}/values.yaml (100%) rename operations/helm/tests/additional-serviceaccount-label/{grafana-agent => alloy}/templates/configmap.yaml (77%) rename operations/helm/tests/{create-daemonset/grafana-agent => additional-serviceaccount-label/alloy}/templates/controllers/daemonset.yaml (78%) rename operations/helm/tests/{controller-volumes-extra/grafana-agent => additional-serviceaccount-label/alloy}/templates/rbac.yaml (83%) rename operations/helm/tests/{clustering/grafana-agent => additional-serviceaccount-label/alloy}/templates/service.yaml (51%) create mode 100644 operations/helm/tests/additional-serviceaccount-label/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml rename operations/helm/tests/clustering/{grafana-agent => alloy}/templates/cluster_service.yaml (64%) rename operations/helm/tests/{controller-volumes-extra/grafana-agent => clustering/alloy}/templates/configmap.yaml (77%) rename operations/helm/tests/clustering/{grafana-agent => alloy}/templates/controllers/statefulset.yaml (76%) rename operations/helm/tests/{create-daemonset-hostnetwork/grafana-agent => clustering/alloy}/templates/rbac.yaml (83%) rename operations/helm/tests/{controller-volumes-extra/grafana-agent => clustering/alloy}/templates/service.yaml (51%) create mode 100644 operations/helm/tests/clustering/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml rename operations/helm/tests/{clustering/grafana-agent => controller-volumes-extra/alloy}/templates/configmap.yaml (77%) rename operations/helm/tests/controller-volumes-extra/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) rename operations/helm/tests/{additional-serviceaccount-label/grafana-agent => controller-volumes-extra/alloy}/templates/rbac.yaml (83%) rename operations/helm/tests/{create-daemonset-hostnetwork/grafana-agent => controller-volumes-extra/alloy}/templates/service.yaml (51%) create mode 100644 operations/helm/tests/controller-volumes-extra/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml rename operations/helm/tests/create-daemonset-hostnetwork/{grafana-agent => alloy}/templates/configmap.yaml (77%) rename operations/helm/tests/create-daemonset-hostnetwork/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) rename operations/helm/tests/{clustering/grafana-agent => create-daemonset-hostnetwork/alloy}/templates/rbac.yaml (83%) rename operations/helm/tests/{additional-serviceaccount-label/grafana-agent => create-daemonset-hostnetwork/alloy}/templates/service.yaml (51%) create mode 100644 operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/create-daemonset/alloy/templates/configmap.yaml rename operations/helm/tests/{additional-serviceaccount-label/grafana-agent => create-daemonset/alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/create-daemonset/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/create-daemonset/alloy/templates/service.yaml create mode 100644 operations/helm/tests/create-daemonset/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-daemonset/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/create-daemonset/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/create-daemonset/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/create-deployment-autoscaling/alloy/templates/configmap.yaml rename operations/helm/tests/create-deployment-autoscaling/{grafana-agent => alloy}/templates/controllers/deployment.yaml (79%) rename operations/helm/tests/create-deployment-autoscaling/{grafana-agent => alloy}/templates/hpa.yaml (82%) create mode 100644 operations/helm/tests/create-deployment-autoscaling/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/create-deployment-autoscaling/alloy/templates/service.yaml create mode 100644 operations/helm/tests/create-deployment-autoscaling/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/create-deployment/alloy/templates/configmap.yaml rename operations/helm/tests/create-deployment/{grafana-agent => alloy}/templates/controllers/deployment.yaml (78%) create mode 100644 operations/helm/tests/create-deployment/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/create-deployment/alloy/templates/service.yaml create mode 100644 operations/helm/tests/create-deployment/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-deployment/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/create-deployment/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/create-deployment/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/create-statefulset-autoscaling/alloy/templates/configmap.yaml rename operations/helm/tests/create-statefulset-autoscaling/{grafana-agent => alloy}/templates/controllers/statefulset.yaml (79%) rename operations/helm/tests/create-statefulset-autoscaling/{grafana-agent => alloy}/templates/hpa.yaml (77%) create mode 100644 operations/helm/tests/create-statefulset-autoscaling/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/create-statefulset-autoscaling/alloy/templates/service.yaml create mode 100644 operations/helm/tests/create-statefulset-autoscaling/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/create-statefulset/alloy/templates/configmap.yaml rename operations/helm/tests/create-statefulset/{grafana-agent => alloy}/templates/controllers/statefulset.yaml (77%) create mode 100644 operations/helm/tests/create-statefulset/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/create-statefulset/alloy/templates/service.yaml create mode 100644 operations/helm/tests/create-statefulset/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/create-statefulset/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/create-statefulset/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/create-statefulset/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml rename operations/helm/tests/custom-config/{grafana-agent => alloy}/templates/configmap.yaml (60%) rename operations/helm/tests/{default-values/grafana-agent => custom-config/alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/custom-config/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/custom-config/alloy/templates/service.yaml create mode 100644 operations/helm/tests/custom-config/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/custom-config/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/custom-config/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/default-values/alloy/templates/configmap.yaml rename operations/helm/tests/{custom-config/grafana-agent => default-values/alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/default-values/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/default-values/alloy/templates/service.yaml create mode 100644 operations/helm/tests/default-values/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/default-values/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/default-values/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/default-values/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/enable-servicemonitor-tls/alloy/templates/configmap.yaml rename operations/helm/tests/enable-servicemonitor-tls/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/enable-servicemonitor-tls/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/enable-servicemonitor-tls/alloy/templates/service.yaml create mode 100644 operations/helm/tests/enable-servicemonitor-tls/alloy/templates/serviceaccount.yaml rename operations/helm/tests/enable-servicemonitor-tls/{grafana-agent => alloy}/templates/servicemonitor.yaml (52%) delete mode 100644 operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/configmap.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/service.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/enable-servicemonitor/alloy/templates/servicemonitor.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/enable-servicemonitor/grafana-agent/templates/servicemonitor.yaml create mode 100644 operations/helm/tests/envFrom/alloy/templates/configmap.yaml rename operations/helm/tests/envFrom/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) create mode 100644 operations/helm/tests/envFrom/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/envFrom/alloy/templates/service.yaml create mode 100644 operations/helm/tests/envFrom/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/envFrom/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/envFrom/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/envFrom/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml rename operations/helm/tests/existing-config/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (80%) create mode 100644 operations/helm/tests/existing-config/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/existing-config/alloy/templates/service.yaml create mode 100644 operations/helm/tests/existing-config/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/existing-config/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/existing-config/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/extra-env/alloy/templates/configmap.yaml rename operations/helm/tests/extra-env/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (80%) create mode 100644 operations/helm/tests/extra-env/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/extra-env/alloy/templates/service.yaml create mode 100644 operations/helm/tests/extra-env/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/extra-env/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/extra-env/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/extra-env/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/extra-ports/alloy/templates/configmap.yaml rename operations/helm/tests/extra-ports/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) create mode 100644 operations/helm/tests/extra-ports/alloy/templates/rbac.yaml rename operations/helm/tests/extra-ports/{grafana-agent => alloy}/templates/service.yaml (57%) create mode 100644 operations/helm/tests/extra-ports/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/extra-ports/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/extra-ports/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/faro-ingress/alloy/templates/configmap.yaml rename operations/helm/tests/faro-ingress/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) rename operations/helm/tests/faro-ingress/{grafana-agent => alloy}/templates/ingress.yaml (63%) create mode 100644 operations/helm/tests/faro-ingress/alloy/templates/rbac.yaml rename operations/helm/tests/faro-ingress/{grafana-agent => alloy}/templates/service.yaml (57%) create mode 100644 operations/helm/tests/faro-ingress/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/faro-ingress/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/faro-ingress/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/global-image-pullsecrets/alloy/templates/configmap.yaml rename operations/helm/tests/global-image-pullsecrets/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) create mode 100644 operations/helm/tests/global-image-pullsecrets/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/global-image-pullsecrets/alloy/templates/service.yaml create mode 100644 operations/helm/tests/global-image-pullsecrets/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/global-image-registry/alloy/templates/configmap.yaml rename operations/helm/tests/global-image-registry/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/global-image-registry/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/global-image-registry/alloy/templates/service.yaml create mode 100644 operations/helm/tests/global-image-registry/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/global-image-registry/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/global-image-registry/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/global-image-registry/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/initcontainers/alloy/templates/configmap.yaml rename operations/helm/tests/initcontainers/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (83%) create mode 100644 operations/helm/tests/initcontainers/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/initcontainers/alloy/templates/service.yaml create mode 100644 operations/helm/tests/initcontainers/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/initcontainers/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/initcontainers/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/initcontainers/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/local-image-pullsecrets/alloy/templates/configmap.yaml rename operations/helm/tests/local-image-pullsecrets/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/local-image-pullsecrets/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/local-image-pullsecrets/alloy/templates/service.yaml create mode 100644 operations/helm/tests/local-image-pullsecrets/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/local-image-registry/alloy/templates/configmap.yaml rename operations/helm/tests/local-image-registry/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/local-image-registry/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/local-image-registry/alloy/templates/service.yaml create mode 100644 operations/helm/tests/local-image-registry/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/local-image-registry/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/local-image-registry/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/local-image-registry/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/configmap.yaml rename operations/helm/tests/nodeselectors-and-tolerations/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (80%) create mode 100644 operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/service.yaml create mode 100644 operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/pod_annotations/alloy/templates/configmap.yaml rename operations/helm/tests/pod_annotations/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (78%) create mode 100644 operations/helm/tests/pod_annotations/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/pod_annotations/alloy/templates/service.yaml create mode 100644 operations/helm/tests/pod_annotations/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/sidecars/alloy/templates/configmap.yaml rename operations/helm/tests/sidecars/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (83%) create mode 100644 operations/helm/tests/sidecars/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/sidecars/alloy/templates/service.yaml create mode 100644 operations/helm/tests/sidecars/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/sidecars/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/topologyspreadconstraints/alloy/templates/configmap.yaml rename operations/helm/tests/topologyspreadconstraints/{grafana-agent => alloy}/templates/controllers/deployment.yaml (81%) create mode 100644 operations/helm/tests/topologyspreadconstraints/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/topologyspreadconstraints/alloy/templates/service.yaml create mode 100644 operations/helm/tests/topologyspreadconstraints/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/with-digests/alloy/templates/configmap.yaml rename operations/helm/tests/with-digests/{grafana-agent => alloy}/templates/controllers/daemonset.yaml (79%) create mode 100644 operations/helm/tests/with-digests/alloy/templates/rbac.yaml create mode 100644 operations/helm/tests/with-digests/alloy/templates/service.yaml create mode 100644 operations/helm/tests/with-digests/alloy/templates/serviceaccount.yaml delete mode 100644 operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml delete mode 100644 operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml delete mode 100644 operations/helm/tests/with-digests/grafana-agent/templates/service.yaml delete mode 100644 operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml diff --git a/.github/workflows/helm-release.yml.disabled b/.github/workflows/helm-release.yml.disabled index 94caa2fd3f..18e3744506 100644 --- a/.github/workflows/helm-release.yml.disabled +++ b/.github/workflows/helm-release.yml.disabled @@ -130,7 +130,7 @@ jobs: # Note that this creates a release in grafana/helm-charts with a new tag. # The tag name in grafana/helm-charts is -, while the - # tag name for grafana/agent is helm-chart/. + # tag name for grafana/alloy is helm-chart/. - name: Make github release uses: softprops/action-gh-release@v1 with: diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml index 378623eebb..a705d78f23 100644 --- a/.github/workflows/helm-test.yml +++ b/.github/workflows/helm-test.yml @@ -10,7 +10,7 @@ jobs: - name: Regenerate docs run: | docker run --rm \ - -v "$(pwd)/operations/helm/charts/grafana-agent:/helm-docs" \ + -v "$(pwd)/operations/helm/charts/alloy:/helm-docs" \ -u "$(id -u)" \ jnorwood/helm-docs if ! git diff --exit-code; then diff --git a/Makefile b/Makefile index e6d823b3eb..47ce69da76 100644 --- a/Makefile +++ b/Makefile @@ -229,7 +229,7 @@ generate-helm-docs: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - cd operations/helm/charts/grafana-agent && helm-docs + cd operations/helm/charts/alloy && helm-docs endif generate-helm-tests: diff --git a/docs/sources/tasks/configure/configure-kubernetes.md b/docs/sources/tasks/configure/configure-kubernetes.md index 822102f80f..e8709d4056 100644 --- a/docs/sources/tasks/configure/configure-kubernetes.md +++ b/docs/sources/tasks/configure/configure-kubernetes.md @@ -19,7 +19,7 @@ To configure {{< param "PRODUCT_NAME" >}} on Kubernetes, perform the following s 1. Run the following command in a terminal to upgrade your {{< param "PRODUCT_NAME" >}} installation: ```shell - helm upgrade RELEASE_NAME grafana/grafana-agent -f VALUES_PATH + helm upgrade RELEASE_NAME grafana/alloy -f VALUES_PATH ``` 1. Replace `RELEASE_NAME` with the name you used for your {{< param "PRODUCT_NAME" >}} installation. @@ -38,12 +38,12 @@ The following is an example snippet of a `kustomization` that disables this beha ```yaml configMapGenerator: - - name: grafana-agent + - name: alloy files: - config.river options: disableNameSuffixHash: true ``` -[values.yaml]: https://raw.githubusercontent.com/grafana/agent/main/operations/helm/charts/grafana-agent/values.yaml -[Helm chart]: https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent +[values.yaml]: https://raw.githubusercontent.com/grafana/alloy/main/operations/helm/charts/alloy/values.yaml +[Helm chart]: https://github.com/grafana/alloy/tree/main/operations/helm/charts/alloy [Kustomize]: https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/ diff --git a/docs/sources/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md index 48b0af6cca..c01edd7c03 100644 --- a/docs/sources/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -279,7 +279,7 @@ However, all static mode integrations have an equivalent component in the [`prom The [reference documentation][component documentation] should help convert those integrations to their {{< param "PRODUCT_NAME" >}} equivalent. -[default values]: https://github.com/grafana/alloy/blob/main/operations/helm/charts/grafana-agent/values.yaml +[default values]: https://github.com/grafana/alloy/blob/main/operations/helm/charts/alloy/values.yaml [clustering]: ../../../concepts/clustering/ [deployment guide]: ../../../get-started/deploy-alloy diff --git a/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/doc.go b/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/doc.go index ec4a8f557d..fbd99f5d4d 100644 --- a/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/doc.go +++ b/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2/doc.go @@ -2,6 +2,6 @@ // +groupName=monitoring.grafana.com //go:generate controller-gen object paths=. -//go:generate controller-gen crd:crdVersions=v1 paths=. output:crd:dir=../../../../../../../../operations/helm/charts/grafana-agent/crds +//go:generate controller-gen crd:crdVersions=v1 paths=. output:crd:dir=../../../../../../../../operations/helm/charts/alloy/crds package v1alpha2 diff --git a/operations/helm/Makefile b/operations/helm/Makefile index 5cf89398a5..de7f25a59e 100644 --- a/operations/helm/Makefile +++ b/operations/helm/Makefile @@ -1,6 +1,6 @@ # Docs generated by https://github.com/norwoodj/helm-docs docs: - cd charts/grafana-agent && helm-docs + cd charts/alloy && helm-docs rebuild-tests: bash ./scripts/rebuild-tests.sh diff --git a/operations/helm/README.md b/operations/helm/README.md index f6fa836eb6..fb74f072c1 100644 --- a/operations/helm/README.md +++ b/operations/helm/README.md @@ -11,7 +11,7 @@ correctness of the templates emitted by the Helm chart. To regenerate this folder, call `make rebuild-tests` from the root of the repository. `make generate-helm-tests` will iterate through the value.yaml files in -`charts/grafana-agent/ci` and generate each one as a separate directory under `charts/grafana-agent/tests`. +`charts/alloy/ci` and generate each one as a separate directory under `charts/alloy/tests`. When modifying the Helm charts, `make rebuild-tests` must be run before submitting a PR, as a linter check will ensure that this directory is diff --git a/operations/helm/charts/grafana-agent/.helmignore b/operations/helm/charts/alloy/.helmignore similarity index 100% rename from operations/helm/charts/grafana-agent/.helmignore rename to operations/helm/charts/alloy/.helmignore diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/alloy/CHANGELOG.md similarity index 100% rename from operations/helm/charts/grafana-agent/CHANGELOG.md rename to operations/helm/charts/alloy/CHANGELOG.md diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/alloy/Chart.yaml similarity index 89% rename from operations/helm/charts/grafana-agent/Chart.yaml rename to operations/helm/charts/alloy/Chart.yaml index 4fd9bf70e5..9de00f3661 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/alloy/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: grafana-agent +name: alloy description: 'Grafana Alloy' type: application version: 0.36.0 diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/alloy/README.md similarity index 100% rename from operations/helm/charts/grafana-agent/README.md rename to operations/helm/charts/alloy/README.md diff --git a/operations/helm/charts/grafana-agent/README.md.gotmpl b/operations/helm/charts/alloy/README.md.gotmpl similarity index 100% rename from operations/helm/charts/grafana-agent/README.md.gotmpl rename to operations/helm/charts/alloy/README.md.gotmpl diff --git a/operations/helm/charts/grafana-agent/charts/crds/Chart.yaml b/operations/helm/charts/alloy/charts/crds/Chart.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/charts/crds/Chart.yaml rename to operations/helm/charts/alloy/charts/crds/Chart.yaml diff --git a/operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml b/operations/helm/charts/alloy/charts/crds/crds/monitoring.grafana.com_podlogs.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/charts/crds/crds/monitoring.grafana.com_podlogs.yaml rename to operations/helm/charts/alloy/charts/crds/crds/monitoring.grafana.com_podlogs.yaml diff --git a/operations/helm/charts/grafana-agent/ci/additional-serviceaccount-label-values.yaml b/operations/helm/charts/alloy/ci/additional-serviceaccount-label-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/additional-serviceaccount-label-values.yaml rename to operations/helm/charts/alloy/ci/additional-serviceaccount-label-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/clustering-values.yaml b/operations/helm/charts/alloy/ci/clustering-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/clustering-values.yaml rename to operations/helm/charts/alloy/ci/clustering-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/controller-volumes-extra-values.yaml b/operations/helm/charts/alloy/ci/controller-volumes-extra-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/controller-volumes-extra-values.yaml rename to operations/helm/charts/alloy/ci/controller-volumes-extra-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-daemonset-hostnetwork-values.yaml b/operations/helm/charts/alloy/ci/create-daemonset-hostnetwork-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-daemonset-hostnetwork-values.yaml rename to operations/helm/charts/alloy/ci/create-daemonset-hostnetwork-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-daemonset-values.yaml b/operations/helm/charts/alloy/ci/create-daemonset-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-daemonset-values.yaml rename to operations/helm/charts/alloy/ci/create-daemonset-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-deployment-autoscaling-values.yaml b/operations/helm/charts/alloy/ci/create-deployment-autoscaling-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-deployment-autoscaling-values.yaml rename to operations/helm/charts/alloy/ci/create-deployment-autoscaling-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-deployment-values.yaml b/operations/helm/charts/alloy/ci/create-deployment-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-deployment-values.yaml rename to operations/helm/charts/alloy/ci/create-deployment-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml b/operations/helm/charts/alloy/ci/create-statefulset-autoscaling-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml rename to operations/helm/charts/alloy/ci/create-statefulset-autoscaling-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/create-statefulset-values.yaml b/operations/helm/charts/alloy/ci/create-statefulset-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/create-statefulset-values.yaml rename to operations/helm/charts/alloy/ci/create-statefulset-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/custom-config-values.yaml b/operations/helm/charts/alloy/ci/custom-config-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/custom-config-values.yaml rename to operations/helm/charts/alloy/ci/custom-config-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/default-values-values.yaml b/operations/helm/charts/alloy/ci/default-values-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/default-values-values.yaml rename to operations/helm/charts/alloy/ci/default-values-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/enable-servicemonitor-tls-values.yaml b/operations/helm/charts/alloy/ci/enable-servicemonitor-tls-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/enable-servicemonitor-tls-values.yaml rename to operations/helm/charts/alloy/ci/enable-servicemonitor-tls-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/enable-servicemonitor-values.yaml b/operations/helm/charts/alloy/ci/enable-servicemonitor-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/enable-servicemonitor-values.yaml rename to operations/helm/charts/alloy/ci/enable-servicemonitor-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/envFrom-values.yaml b/operations/helm/charts/alloy/ci/envFrom-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/envFrom-values.yaml rename to operations/helm/charts/alloy/ci/envFrom-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/existing-config-values.yaml b/operations/helm/charts/alloy/ci/existing-config-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/existing-config-values.yaml rename to operations/helm/charts/alloy/ci/existing-config-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/extra-env-values.yaml b/operations/helm/charts/alloy/ci/extra-env-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/extra-env-values.yaml rename to operations/helm/charts/alloy/ci/extra-env-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/extra-ports-values.yaml b/operations/helm/charts/alloy/ci/extra-ports-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/extra-ports-values.yaml rename to operations/helm/charts/alloy/ci/extra-ports-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/faro-ingress-values.yaml b/operations/helm/charts/alloy/ci/faro-ingress-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/faro-ingress-values.yaml rename to operations/helm/charts/alloy/ci/faro-ingress-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/global-image-pullsecrets-values.yaml b/operations/helm/charts/alloy/ci/global-image-pullsecrets-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/global-image-pullsecrets-values.yaml rename to operations/helm/charts/alloy/ci/global-image-pullsecrets-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/global-image-registry-values.yaml b/operations/helm/charts/alloy/ci/global-image-registry-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/global-image-registry-values.yaml rename to operations/helm/charts/alloy/ci/global-image-registry-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml b/operations/helm/charts/alloy/ci/initcontainers-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/initcontainers-values.yaml rename to operations/helm/charts/alloy/ci/initcontainers-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/local-image-pullsecrets-values.yaml b/operations/helm/charts/alloy/ci/local-image-pullsecrets-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/local-image-pullsecrets-values.yaml rename to operations/helm/charts/alloy/ci/local-image-pullsecrets-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/local-image-registry-values.yaml b/operations/helm/charts/alloy/ci/local-image-registry-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/local-image-registry-values.yaml rename to operations/helm/charts/alloy/ci/local-image-registry-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/nodeselectors-and-tolerations-values.yaml b/operations/helm/charts/alloy/ci/nodeselectors-and-tolerations-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/nodeselectors-and-tolerations-values.yaml rename to operations/helm/charts/alloy/ci/nodeselectors-and-tolerations-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/pod_annotations-values.yaml b/operations/helm/charts/alloy/ci/pod_annotations-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/pod_annotations-values.yaml rename to operations/helm/charts/alloy/ci/pod_annotations-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/sidecars-values.yaml b/operations/helm/charts/alloy/ci/sidecars-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/sidecars-values.yaml rename to operations/helm/charts/alloy/ci/sidecars-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/topologyspreadconstraints-values.yaml b/operations/helm/charts/alloy/ci/topologyspreadconstraints-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/topologyspreadconstraints-values.yaml rename to operations/helm/charts/alloy/ci/topologyspreadconstraints-values.yaml diff --git a/operations/helm/charts/grafana-agent/ci/with-digests-values.yaml b/operations/helm/charts/alloy/ci/with-digests-values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/ci/with-digests-values.yaml rename to operations/helm/charts/alloy/ci/with-digests-values.yaml diff --git a/operations/helm/charts/grafana-agent/config/example.river b/operations/helm/charts/alloy/config/example.river similarity index 100% rename from operations/helm/charts/grafana-agent/config/example.river rename to operations/helm/charts/alloy/config/example.river diff --git a/operations/helm/charts/grafana-agent/templates/NOTES.txt b/operations/helm/charts/alloy/templates/NOTES.txt similarity index 100% rename from operations/helm/charts/grafana-agent/templates/NOTES.txt rename to operations/helm/charts/alloy/templates/NOTES.txt diff --git a/operations/helm/charts/grafana-agent/templates/_config.tpl b/operations/helm/charts/alloy/templates/_config.tpl similarity index 100% rename from operations/helm/charts/grafana-agent/templates/_config.tpl rename to operations/helm/charts/alloy/templates/_config.tpl diff --git a/operations/helm/charts/grafana-agent/templates/_helpers.tpl b/operations/helm/charts/alloy/templates/_helpers.tpl similarity index 100% rename from operations/helm/charts/grafana-agent/templates/_helpers.tpl rename to operations/helm/charts/alloy/templates/_helpers.tpl diff --git a/operations/helm/charts/grafana-agent/templates/cluster_service.yaml b/operations/helm/charts/alloy/templates/cluster_service.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/cluster_service.yaml rename to operations/helm/charts/alloy/templates/cluster_service.yaml diff --git a/operations/helm/charts/grafana-agent/templates/configmap.yaml b/operations/helm/charts/alloy/templates/configmap.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/configmap.yaml rename to operations/helm/charts/alloy/templates/configmap.yaml diff --git a/operations/helm/charts/grafana-agent/templates/containers/_agent.yaml b/operations/helm/charts/alloy/templates/containers/_agent.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/containers/_agent.yaml rename to operations/helm/charts/alloy/templates/containers/_agent.yaml diff --git a/operations/helm/charts/grafana-agent/templates/containers/_watch.yaml b/operations/helm/charts/alloy/templates/containers/_watch.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/containers/_watch.yaml rename to operations/helm/charts/alloy/templates/containers/_watch.yaml diff --git a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml b/operations/helm/charts/alloy/templates/controllers/_pod.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml rename to operations/helm/charts/alloy/templates/controllers/_pod.yaml diff --git a/operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/charts/alloy/templates/controllers/daemonset.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/charts/alloy/templates/controllers/daemonset.yaml diff --git a/operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/charts/alloy/templates/controllers/deployment.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/controllers/deployment.yaml rename to operations/helm/charts/alloy/templates/controllers/deployment.yaml diff --git a/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/charts/alloy/templates/controllers/statefulset.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml rename to operations/helm/charts/alloy/templates/controllers/statefulset.yaml diff --git a/operations/helm/charts/grafana-agent/templates/hpa.yaml b/operations/helm/charts/alloy/templates/hpa.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/hpa.yaml rename to operations/helm/charts/alloy/templates/hpa.yaml diff --git a/operations/helm/charts/grafana-agent/templates/ingress.yaml b/operations/helm/charts/alloy/templates/ingress.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/ingress.yaml rename to operations/helm/charts/alloy/templates/ingress.yaml diff --git a/operations/helm/charts/grafana-agent/templates/rbac.yaml b/operations/helm/charts/alloy/templates/rbac.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/rbac.yaml rename to operations/helm/charts/alloy/templates/rbac.yaml diff --git a/operations/helm/charts/grafana-agent/templates/service.yaml b/operations/helm/charts/alloy/templates/service.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/service.yaml rename to operations/helm/charts/alloy/templates/service.yaml diff --git a/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml b/operations/helm/charts/alloy/templates/serviceaccount.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/serviceaccount.yaml rename to operations/helm/charts/alloy/templates/serviceaccount.yaml diff --git a/operations/helm/charts/grafana-agent/templates/servicemonitor.yaml b/operations/helm/charts/alloy/templates/servicemonitor.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/templates/servicemonitor.yaml rename to operations/helm/charts/alloy/templates/servicemonitor.yaml diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/alloy/values.yaml similarity index 100% rename from operations/helm/charts/grafana-agent/values.yaml rename to operations/helm/charts/alloy/values.yaml diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/configmap.yaml similarity index 77% rename from operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml rename to operations/helm/tests/additional-serviceaccount-label/alloy/templates/configmap.yaml index 2fdc6f0117..c9ae77aa1e 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/configmap.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/configmap.yaml +# Source: alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm data: diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml index 97d9cbfb9b..fdd2df0969 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/rbac.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/rbac.yaml similarity index 83% rename from operations/helm/tests/controller-volumes-extra/grafana-agent/templates/rbac.yaml rename to operations/helm/tests/additional-serviceaccount-label/alloy/templates/rbac.yaml index 3765583fb6..53ca27544f 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/rbac.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/rbac.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm rules: @@ -96,22 +96,22 @@ rules: resources: ["replicasets"] verbs: ["get", "list", "watch"] --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: grafana-agent + name: alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: alloy namespace: default diff --git a/operations/helm/tests/clustering/grafana-agent/templates/service.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/service.yaml similarity index 51% rename from operations/helm/tests/clustering/grafana-agent/templates/service.yaml rename to operations/helm/tests/additional-serviceaccount-label/alloy/templates/service.yaml index c98f79428b..34c0608fd3 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/additional-serviceaccount-label/alloy/templates/serviceaccount.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..8c685ec855 --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm + test: "true" diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 08eca9f756..0000000000 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm - test: "true" diff --git a/operations/helm/tests/clustering/grafana-agent/templates/cluster_service.yaml b/operations/helm/tests/clustering/alloy/templates/cluster_service.yaml similarity index 64% rename from operations/helm/tests/clustering/grafana-agent/templates/cluster_service.yaml rename to operations/helm/tests/clustering/alloy/templates/cluster_service.yaml index fb9f35e867..b7dd21b837 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/cluster_service.yaml +++ b/operations/helm/tests/clustering/alloy/templates/cluster_service.yaml @@ -1,21 +1,21 @@ --- -# Source: grafana-agent/templates/cluster_service.yaml +# Source: alloy/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent-cluster + name: alloy-cluster labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP clusterIP: 'None' selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/configmap.yaml b/operations/helm/tests/clustering/alloy/templates/configmap.yaml similarity index 77% rename from operations/helm/tests/controller-volumes-extra/grafana-agent/templates/configmap.yaml rename to operations/helm/tests/clustering/alloy/templates/configmap.yaml index 2fdc6f0117..c9ae77aa1e 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/configmap.yaml +++ b/operations/helm/tests/clustering/alloy/templates/configmap.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/configmap.yaml +# Source: alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm data: diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml similarity index 76% rename from operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml rename to operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml index ba6d8e8ba7..1de3c634c7 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml @@ -1,33 +1,33 @@ --- -# Source: grafana-agent/templates/controllers/statefulset.yaml +# Source: alloy/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: replicas: 3 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: grafana-agent + serviceName: alloy selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -39,7 +39,7 @@ spec: - --server.http.listen-addr=0.0.0.0:80 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=grafana-agent-cluster + - --cluster.join-addresses=alloy-cluster env: - name: AGENT_DEPLOY_MODE value: "helm" @@ -76,4 +76,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/rbac.yaml b/operations/helm/tests/clustering/alloy/templates/rbac.yaml similarity index 83% rename from operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/rbac.yaml rename to operations/helm/tests/clustering/alloy/templates/rbac.yaml index 3765583fb6..53ca27544f 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/rbac.yaml +++ b/operations/helm/tests/clustering/alloy/templates/rbac.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm rules: @@ -96,22 +96,22 @@ rules: resources: ["replicasets"] verbs: ["get", "list", "watch"] --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: grafana-agent + name: alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: alloy namespace: default diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/service.yaml b/operations/helm/tests/clustering/alloy/templates/service.yaml similarity index 51% rename from operations/helm/tests/controller-volumes-extra/grafana-agent/templates/service.yaml rename to operations/helm/tests/clustering/alloy/templates/service.yaml index c98f79428b..34c0608fd3 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/clustering/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/clustering/alloy/templates/serviceaccount.yaml b/operations/helm/tests/clustering/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/clustering/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/clustering/grafana-agent/templates/configmap.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/configmap.yaml similarity index 77% rename from operations/helm/tests/clustering/grafana-agent/templates/configmap.yaml rename to operations/helm/tests/controller-volumes-extra/alloy/templates/configmap.yaml index 2fdc6f0117..c9ae77aa1e 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/configmap.yaml +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/configmap.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/configmap.yaml +# Source: alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm data: diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml index 727b30100f..67b85b9397 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,7 +74,7 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy - emptyDir: sizeLimit: 500Mi name: cache-volume diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/rbac.yaml similarity index 83% rename from operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml rename to operations/helm/tests/controller-volumes-extra/alloy/templates/rbac.yaml index 3765583fb6..53ca27544f 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/rbac.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm rules: @@ -96,22 +96,22 @@ rules: resources: ["replicasets"] verbs: ["get", "list", "watch"] --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: grafana-agent + name: alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: alloy namespace: default diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/service.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/service.yaml similarity index 51% rename from operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/service.yaml rename to operations/helm/tests/controller-volumes-extra/alloy/templates/service.yaml index c98f79428b..34c0608fd3 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/controller-volumes-extra/alloy/templates/serviceaccount.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/configmap.yaml similarity index 77% rename from operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/configmap.yaml rename to operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/configmap.yaml index 2fdc6f0117..c9ae77aa1e 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/configmap.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/configmap.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/configmap.yaml +# Source: alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm data: diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml index 1786dc9e1e..520e373a6c 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -72,4 +72,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/clustering/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/rbac.yaml similarity index 83% rename from operations/helm/tests/clustering/grafana-agent/templates/rbac.yaml rename to operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/rbac.yaml index 3765583fb6..53ca27544f 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/rbac.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/rbac.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm rules: @@ -96,22 +96,22 @@ rules: resources: ["replicasets"] verbs: ["get", "list", "watch"] --- -# Source: grafana-agent/templates/rbac.yaml +# Source: alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: grafana-agent + name: alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: alloy namespace: default diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/service.yaml similarity index 51% rename from operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml rename to operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/service.yaml index c98f79428b..34c0608fd3 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-daemonset/alloy/templates/configmap.yaml b/operations/helm/tests/create-daemonset/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/create-daemonset/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml index 97d9cbfb9b..fdd2df0969 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/create-daemonset/alloy/templates/rbac.yaml b/operations/helm/tests/create-daemonset/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/create-daemonset/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/create-daemonset/alloy/templates/service.yaml b/operations/helm/tests/create-daemonset/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/create-daemonset/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/create-daemonset/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-daemonset/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/service.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/configmap.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml similarity index 79% rename from operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml rename to operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml index efd291c67f..9541ab33c3 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/deployment.yaml +# Source: alloy/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,4 +74,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/hpa.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/hpa.yaml similarity index 82% rename from operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/hpa.yaml rename to operations/helm/tests/create-deployment-autoscaling/alloy/templates/hpa.yaml index b181724fe4..7914063972 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/hpa.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/hpa.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/hpa.yaml +# Source: alloy/templates/hpa.yaml apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: apiVersion: apps/v1 kind: deployment - name: grafana-agent + name: alloy minReplicas: 1 maxReplicas: 5 behavior: diff --git a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/rbac.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/service.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/service.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-deployment/alloy/templates/configmap.yaml b/operations/helm/tests/create-deployment/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/create-deployment/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml similarity index 78% rename from operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml rename to operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml index a0bde9d5bc..2c7d54aed8 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/controllers/deployment.yaml +# Source: alloy/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: @@ -15,17 +15,17 @@ spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -72,4 +72,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/create-deployment/alloy/templates/rbac.yaml b/operations/helm/tests/create-deployment/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/create-deployment/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/create-deployment/alloy/templates/service.yaml b/operations/helm/tests/create-deployment/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/create-deployment/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/create-deployment/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-deployment/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/service.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/configmap.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml similarity index 79% rename from operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml rename to operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml index 084e210b31..4306d0d521 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml @@ -1,32 +1,32 @@ --- -# Source: grafana-agent/templates/controllers/statefulset.yaml +# Source: alloy/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: grafana-agent + serviceName: alloy selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -76,7 +76,7 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy persistentVolumeClaimRetentionPolicy: whenDeleted: Delete whenScaled: Delete diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/hpa.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/hpa.yaml similarity index 77% rename from operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/hpa.yaml rename to operations/helm/tests/create-statefulset-autoscaling/alloy/templates/hpa.yaml index a6674c612a..2dd9b01343 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/hpa.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/hpa.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/hpa.yaml +# Source: alloy/templates/hpa.yaml apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: apiVersion: apps/v1 kind: statefulset - name: grafana-agent + name: alloy minReplicas: 1 maxReplicas: 5 behavior: diff --git a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/rbac.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/service.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/service.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-statefulset/alloy/templates/configmap.yaml b/operations/helm/tests/create-statefulset/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/create-statefulset/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml similarity index 77% rename from operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml rename to operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml index 823bd087b6..1ee176d9d9 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml @@ -1,33 +1,33 @@ --- -# Source: grafana-agent/templates/controllers/statefulset.yaml +# Source: alloy/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: grafana-agent + serviceName: alloy selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,4 +74,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/create-statefulset/alloy/templates/rbac.yaml b/operations/helm/tests/create-statefulset/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/create-statefulset/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/create-statefulset/alloy/templates/service.yaml b/operations/helm/tests/create-statefulset/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/create-statefulset/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/create-statefulset/alloy/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/create-statefulset/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/configmap.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/rbac.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/service.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/configmap.yaml b/operations/helm/tests/custom-config/alloy/templates/configmap.yaml similarity index 60% rename from operations/helm/tests/custom-config/grafana-agent/templates/configmap.yaml rename to operations/helm/tests/custom-config/alloy/templates/configmap.yaml index 1b7aeff62f..17d19ad116 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/configmap.yaml +++ b/operations/helm/tests/custom-config/alloy/templates/configmap.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/configmap.yaml +# Source: alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm data: diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml index 97d9cbfb9b..fdd2df0969 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/custom-config/alloy/templates/rbac.yaml b/operations/helm/tests/custom-config/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/custom-config/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/custom-config/alloy/templates/service.yaml b/operations/helm/tests/custom-config/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/custom-config/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/custom-config/alloy/templates/serviceaccount.yaml b/operations/helm/tests/custom-config/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/custom-config/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/rbac.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/custom-config/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/service.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/custom-config/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/default-values/alloy/templates/configmap.yaml b/operations/helm/tests/default-values/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/default-values/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml index 97d9cbfb9b..fdd2df0969 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/default-values/alloy/templates/rbac.yaml b/operations/helm/tests/default-values/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/default-values/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/default-values/alloy/templates/service.yaml b/operations/helm/tests/default-values/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/default-values/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/default-values/alloy/templates/serviceaccount.yaml b/operations/helm/tests/default-values/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/default-values/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/default-values/grafana-agent/templates/configmap.yaml b/operations/helm/tests/default-values/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/default-values/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/default-values/grafana-agent/templates/rbac.yaml b/operations/helm/tests/default-values/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/default-values/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/default-values/grafana-agent/templates/service.yaml b/operations/helm/tests/default-values/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/default-values/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/configmap.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml index 10c6c433d7..72fc14e915 100644 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/rbac.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/service.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/serviceaccount.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/servicemonitor.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/servicemonitor.yaml similarity index 52% rename from operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/servicemonitor.yaml rename to operations/helm/tests/enable-servicemonitor-tls/alloy/templates/servicemonitor.yaml index 3312cb651c..ac88625439 100644 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/servicemonitor.yaml +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/servicemonitor.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/servicemonitor.yaml +# Source: alloy/templates/servicemonitor.yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: @@ -19,5 +19,5 @@ spec: insecureSkipVerify: true selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/configmap.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/rbac.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/service.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/configmap.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml new file mode 100644 index 0000000000..fdd2df0969 --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml @@ -0,0 +1,74 @@ +--- +# Source: alloy/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + spec: + serviceAccountName: alloy + containers: + - name: alloy + image: docker.io/grafana/alloy:v0.40.2 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.river + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: alloy diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/rbac.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/service.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/serviceaccount.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/servicemonitor.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/servicemonitor.yaml new file mode 100644 index 0000000000..0437bb1ef1 --- /dev/null +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/servicemonitor.yaml @@ -0,0 +1,21 @@ +--- +# Source: alloy/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + endpoints: + - port: http-metrics + scheme: http + honorLabels: true + selector: + matchLabels: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/configmap.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml deleted file mode 100644 index 97d9cbfb9b..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ /dev/null @@ -1,74 +0,0 @@ ---- -# Source: grafana-agent/templates/controllers/daemonset.yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - minReadySeconds: 10 - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: alloy - labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - spec: - serviceAccountName: grafana-agent - containers: - - name: alloy - image: docker.io/grafana/alloy:v0.40.2 - imagePullPolicy: IfNotPresent - args: - - run - - /etc/alloy/config.river - - --storage.path=/tmp/alloy - - --server.http.listen-addr=0.0.0.0:80 - - --server.http.ui-path-prefix=/ - env: - - name: AGENT_DEPLOY_MODE - value: "helm" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ports: - - containerPort: 80 - name: http-metrics - readinessProbe: - httpGet: - path: /-/ready - port: 80 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 1 - volumeMounts: - - name: config - mountPath: /etc/alloy - - name: config-reloader - image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 - args: - - --volume-dir=/etc/alloy - - --webhook-url=http://localhost:80/-/reload - volumeMounts: - - name: config - mountPath: /etc/alloy - resources: - requests: - cpu: 1m - memory: 5Mi - dnsPolicy: ClusterFirst - volumes: - - name: config - configMap: - name: grafana-agent diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/rbac.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/service.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/servicemonitor.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/servicemonitor.yaml deleted file mode 100644 index 4a5a3e6bc4..0000000000 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/servicemonitor.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# Source: grafana-agent/templates/servicemonitor.yaml -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - endpoints: - - port: http-metrics - scheme: http - honorLabels: true - selector: - matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent diff --git a/operations/helm/tests/envFrom/alloy/templates/configmap.yaml b/operations/helm/tests/envFrom/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/envFrom/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml index 5fc992ce56..b210d6a202 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,4 +74,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/envFrom/alloy/templates/rbac.yaml b/operations/helm/tests/envFrom/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/envFrom/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/envFrom/alloy/templates/service.yaml b/operations/helm/tests/envFrom/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/envFrom/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/envFrom/alloy/templates/serviceaccount.yaml b/operations/helm/tests/envFrom/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/envFrom/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/configmap.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/envFrom/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/rbac.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/envFrom/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/service.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/envFrom/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml similarity index 80% rename from operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml index a32b52ae04..d5da2f01bb 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 diff --git a/operations/helm/tests/existing-config/alloy/templates/rbac.yaml b/operations/helm/tests/existing-config/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/existing-config/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/existing-config/alloy/templates/service.yaml b/operations/helm/tests/existing-config/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/existing-config/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/existing-config/alloy/templates/serviceaccount.yaml b/operations/helm/tests/existing-config/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/existing-config/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/rbac.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/existing-config/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/service.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/existing-config/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/extra-env/alloy/templates/configmap.yaml b/operations/helm/tests/extra-env/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/extra-env/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml similarity index 80% rename from operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml index b0eacff778..ba538213a4 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -80,4 +80,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/extra-env/alloy/templates/rbac.yaml b/operations/helm/tests/extra-env/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/extra-env/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/extra-env/alloy/templates/service.yaml b/operations/helm/tests/extra-env/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/extra-env/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/extra-env/alloy/templates/serviceaccount.yaml b/operations/helm/tests/extra-env/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/extra-env/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/configmap.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/extra-env/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/rbac.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/extra-env/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/service.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/extra-env/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/extra-ports/alloy/templates/configmap.yaml b/operations/helm/tests/extra-ports/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/extra-ports/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml index d01572ae7d..b68ed7e8fd 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,4 +74,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/extra-ports/alloy/templates/rbac.yaml b/operations/helm/tests/extra-ports/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/extra-ports/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/service.yaml b/operations/helm/tests/extra-ports/alloy/templates/service.yaml similarity index 57% rename from operations/helm/tests/extra-ports/grafana-agent/templates/service.yaml rename to operations/helm/tests/extra-ports/alloy/templates/service.yaml index 89acaacaad..e4fc0125bb 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/extra-ports/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/extra-ports/alloy/templates/serviceaccount.yaml b/operations/helm/tests/extra-ports/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/extra-ports/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/configmap.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/rbac.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/faro-ingress/alloy/templates/configmap.yaml b/operations/helm/tests/faro-ingress/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/faro-ingress/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml index c755dd92e6..232baf4cd0 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -74,4 +74,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/ingress.yaml b/operations/helm/tests/faro-ingress/alloy/templates/ingress.yaml similarity index 63% rename from operations/helm/tests/faro-ingress/grafana-agent/templates/ingress.yaml rename to operations/helm/tests/faro-ingress/alloy/templates/ingress.yaml index dfd9a0b9ba..7c4b32c460 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/ingress.yaml +++ b/operations/helm/tests/faro-ingress/alloy/templates/ingress.yaml @@ -1,14 +1,14 @@ --- -# Source: grafana-agent/templates/ingress.yaml +# Source: alloy/templates/ingress.yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: grafana-agent + name: alloy namespace: default labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: @@ -20,6 +20,6 @@ spec: pathType: Prefix backend: service: - name: grafana-agent + name: alloy port: number: 12347 diff --git a/operations/helm/tests/faro-ingress/alloy/templates/rbac.yaml b/operations/helm/tests/faro-ingress/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/faro-ingress/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/service.yaml b/operations/helm/tests/faro-ingress/alloy/templates/service.yaml similarity index 57% rename from operations/helm/tests/faro-ingress/grafana-agent/templates/service.yaml rename to operations/helm/tests/faro-ingress/alloy/templates/service.yaml index 46474326df..18c045acff 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/service.yaml +++ b/operations/helm/tests/faro-ingress/alloy/templates/service.yaml @@ -1,20 +1,20 @@ --- -# Source: grafana-agent/templates/service.yaml +# Source: alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy internalTrafficPolicy: Cluster ports: - name: http-metrics diff --git a/operations/helm/tests/faro-ingress/alloy/templates/serviceaccount.yaml b/operations/helm/tests/faro-ingress/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/faro-ingress/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/configmap.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/rbac.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/global-image-pullsecrets/alloy/templates/configmap.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml index bf6d4190b7..2dbc3aa049 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml @@ -1,33 +1,33 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: securityContext: runAsGroup: 1000 runAsUser: 1000 - serviceAccountName: grafana-agent + serviceAccountName: alloy imagePullSecrets: - name: global-cred containers: @@ -76,4 +76,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/global-image-pullsecrets/alloy/templates/rbac.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/global-image-pullsecrets/alloy/templates/service.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/global-image-pullsecrets/alloy/templates/serviceaccount.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/configmap.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/rbac.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/service.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/global-image-registry/alloy/templates/configmap.yaml b/operations/helm/tests/global-image-registry/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/global-image-registry/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml index 2f3ed25026..c066fd7be5 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: quay.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/global-image-registry/alloy/templates/rbac.yaml b/operations/helm/tests/global-image-registry/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/global-image-registry/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/global-image-registry/alloy/templates/service.yaml b/operations/helm/tests/global-image-registry/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/global-image-registry/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/global-image-registry/alloy/templates/serviceaccount.yaml b/operations/helm/tests/global-image-registry/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/global-image-registry/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/configmap.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/rbac.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/service.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/initcontainers/alloy/templates/configmap.yaml b/operations/helm/tests/initcontainers/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/initcontainers/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml similarity index 83% rename from operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml index a6e441ea11..69ac09525d 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy initContainers: - env: - name: GEOIPUPDATE_ACCOUNT_ID @@ -92,6 +92,6 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy - mountPath: /etc/geoip name: geoip diff --git a/operations/helm/tests/initcontainers/alloy/templates/rbac.yaml b/operations/helm/tests/initcontainers/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/initcontainers/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/initcontainers/alloy/templates/service.yaml b/operations/helm/tests/initcontainers/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/initcontainers/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/initcontainers/alloy/templates/serviceaccount.yaml b/operations/helm/tests/initcontainers/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/initcontainers/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/configmap.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/rbac.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/service.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/local-image-pullsecrets/alloy/templates/configmap.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml index fe2e40bf2a..83f29f0ce3 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy imagePullSecrets: - name: local-cred containers: @@ -73,4 +73,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/local-image-pullsecrets/alloy/templates/rbac.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/local-image-pullsecrets/alloy/templates/service.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/local-image-pullsecrets/alloy/templates/serviceaccount.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/configmap.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/rbac.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/service.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/local-image-registry/alloy/templates/configmap.yaml b/operations/helm/tests/local-image-registry/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/local-image-registry/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml index 2f3ed25026..c066fd7be5 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: quay.io/grafana/alloy:v0.40.2 @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/local-image-registry/alloy/templates/rbac.yaml b/operations/helm/tests/local-image-registry/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/local-image-registry/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/local-image-registry/alloy/templates/service.yaml b/operations/helm/tests/local-image-registry/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/local-image-registry/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/local-image-registry/alloy/templates/serviceaccount.yaml b/operations/helm/tests/local-image-registry/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/local-image-registry/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/configmap.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/rbac.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/service.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/configmap.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml similarity index 80% rename from operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml index 8fdfbd71c9..32b03746c8 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -81,4 +81,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/rbac.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/service.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/serviceaccount.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/configmap.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/rbac.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/service.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/pod_annotations/alloy/templates/configmap.yaml b/operations/helm/tests/pod_annotations/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/pod_annotations/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml similarity index 78% rename from operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml index 0cdcbbe73a..557600ede9 100644 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml @@ -1,31 +1,31 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy testAnnotationKey: testAnnotationValue labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -72,4 +72,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/pod_annotations/alloy/templates/rbac.yaml b/operations/helm/tests/pod_annotations/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/pod_annotations/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/pod_annotations/alloy/templates/service.yaml b/operations/helm/tests/pod_annotations/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/pod_annotations/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/pod_annotations/alloy/templates/serviceaccount.yaml b/operations/helm/tests/pod_annotations/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/pod_annotations/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/sidecars/alloy/templates/configmap.yaml b/operations/helm/tests/sidecars/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/sidecars/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml similarity index 83% rename from operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml index 410fa04519..6f21c3078b 100644 --- a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -91,6 +91,6 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy - mountPath: /etc/geoip name: geoip diff --git a/operations/helm/tests/sidecars/alloy/templates/rbac.yaml b/operations/helm/tests/sidecars/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/sidecars/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/sidecars/alloy/templates/service.yaml b/operations/helm/tests/sidecars/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/sidecars/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/sidecars/alloy/templates/serviceaccount.yaml b/operations/helm/tests/sidecars/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/sidecars/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/topologyspreadconstraints/alloy/templates/configmap.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml similarity index 81% rename from operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml rename to operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml index 7faf74aa1c..2b0ff9dfc6 100644 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml @@ -1,13 +1,13 @@ --- -# Source: grafana-agent/templates/controllers/deployment.yaml +# Source: alloy/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: @@ -15,17 +15,17 @@ spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/alloy:v0.40.2 @@ -80,4 +80,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/topologyspreadconstraints/alloy/templates/rbac.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/topologyspreadconstraints/alloy/templates/service.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/topologyspreadconstraints/alloy/templates/serviceaccount.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/with-digests/alloy/templates/configmap.yaml b/operations/helm/tests/with-digests/alloy/templates/configmap.yaml new file mode 100644 index 0000000000..c9ae77aa1e --- /dev/null +++ b/operations/helm/tests/with-digests/alloy/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/with-digests/alloy/templates/controllers/daemonset.yaml similarity index 79% rename from operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml rename to operations/helm/tests/with-digests/alloy/templates/controllers/daemonset.yaml index 57eb2cb87a..4418f7f17a 100644 --- a/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/with-digests/alloy/templates/controllers/daemonset.yaml @@ -1,30 +1,30 @@ --- -# Source: grafana-agent/templates/controllers/daemonset.yaml +# Source: alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: grafana-agent + name: alloy labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: Helm spec: minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy spec: - serviceAccountName: grafana-agent + serviceAccountName: alloy containers: - name: alloy image: docker.io/grafana/agent@sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf @@ -71,4 +71,4 @@ spec: volumes: - name: config configMap: - name: grafana-agent + name: alloy diff --git a/operations/helm/tests/with-digests/alloy/templates/rbac.yaml b/operations/helm/tests/with-digests/alloy/templates/rbac.yaml new file mode 100644 index 0000000000..53ca27544f --- /dev/null +++ b/operations/helm/tests/with-digests/alloy/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: default diff --git a/operations/helm/tests/with-digests/alloy/templates/service.yaml b/operations/helm/tests/with-digests/alloy/templates/service.yaml new file mode 100644 index 0000000000..34c0608fd3 --- /dev/null +++ b/operations/helm/tests/with-digests/alloy/templates/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/with-digests/alloy/templates/serviceaccount.yaml b/operations/helm/tests/with-digests/alloy/templates/serviceaccount.yaml new file mode 100644 index 0000000000..46df991efb --- /dev/null +++ b/operations/helm/tests/with-digests/alloy/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: default + labels: + helm.sh/chart: alloy + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: alloy + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml deleted file mode 100644 index 2fdc6f0117..0000000000 --- a/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Source: grafana-agent/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -data: - config.river: |- - logging { - level = "info" - format = "logfmt" - } - - discovery.kubernetes "pods" { - role = "pod" - } - - discovery.kubernetes "nodes" { - role = "node" - } - - discovery.kubernetes "services" { - role = "service" - } - - discovery.kubernetes "endpoints" { - role = "endpoints" - } - - discovery.kubernetes "endpointslices" { - role = "endpointslice" - } - - discovery.kubernetes "ingresses" { - role = "ingress" - } diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml deleted file mode 100644 index 3765583fb6..0000000000 --- a/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml +++ /dev/null @@ -1,117 +0,0 @@ ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -rules: - # Rules which allow discovery.kubernetes to function. - - apiGroups: - - "" - - "discovery.k8s.io" - - "networking.k8s.io" - resources: - - endpoints - - endpointslices - - ingresses - - nodes - - nodes/proxy - - nodes/metrics - - pods - - services - verbs: - - get - - list - - watch - # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. - - apiGroups: - - "" - resources: - - pods - - pods/log - - namespaces - verbs: - - get - - list - - watch - - apiGroups: - - "monitoring.grafana.com" - resources: - - podlogs - verbs: - - get - - list - - watch - # Rules which allow mimir.rules.kubernetes to work. - - apiGroups: ["monitoring.coreos.com"] - resources: - - prometheusrules - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - verbs: - - get - # Rules for prometheus.kubernetes.* - - apiGroups: ["monitoring.coreos.com"] - resources: - - podmonitors - - servicemonitors - - probes - verbs: - - get - - list - - watch - # Rules which allow eventhandler to work. - - apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch - # needed for remote.kubernetes.* - - apiGroups: [""] - resources: - - "configmaps" - - "secrets" - verbs: - - get - - list - - watch - # needed for otelcol.processor.k8sattributes - - apiGroups: ["apps"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] - resources: ["replicasets"] - verbs: ["get", "list", "watch"] ---- -# Source: grafana-agent/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml deleted file mode 100644 index c98f79428b..0000000000 --- a/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Source: grafana-agent/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - selector: - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - internalTrafficPolicy: Cluster - ports: - - name: http-metrics - port: 80 - targetPort: 80 - protocol: "TCP" diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml deleted file mode 100644 index 65d7e0df38..0000000000 --- a/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Source: grafana-agent/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: default - labels: - helm.sh/chart: grafana-agent - app.kubernetes.io/name: grafana-agent - app.kubernetes.io/instance: grafana-agent - app.kubernetes.io/version: "vX.Y.Z" - app.kubernetes.io/managed-by: Helm From 756d28531eea61be27cf9ca63ee63370cb8ee599 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 12 Mar 2024 08:47:32 -0400 Subject: [PATCH 014/136] helm: reset alloy Chart version and update appVersion to 1.0 --- operations/helm/charts/alloy/Chart.yaml | 4 ++-- operations/helm/charts/alloy/README.md | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../clustering/alloy/templates/controllers/statefulset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/deployment.yaml | 2 +- .../alloy/templates/controllers/deployment.yaml | 2 +- .../alloy/templates/controllers/statefulset.yaml | 2 +- .../alloy/templates/controllers/statefulset.yaml | 2 +- .../custom-config/alloy/templates/controllers/daemonset.yaml | 2 +- .../default-values/alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../tests/envFrom/alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../extra-env/alloy/templates/controllers/daemonset.yaml | 2 +- .../extra-ports/alloy/templates/controllers/daemonset.yaml | 2 +- .../faro-ingress/alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../initcontainers/alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/daemonset.yaml | 2 +- .../tests/sidecars/alloy/templates/controllers/daemonset.yaml | 2 +- .../alloy/templates/controllers/deployment.yaml | 2 +- 29 files changed, 30 insertions(+), 30 deletions(-) diff --git a/operations/helm/charts/alloy/Chart.yaml b/operations/helm/charts/alloy/Chart.yaml index 9de00f3661..d24a9248c1 100644 --- a/operations/helm/charts/alloy/Chart.yaml +++ b/operations/helm/charts/alloy/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: alloy description: 'Grafana Alloy' type: application -version: 0.36.0 -appVersion: 'v0.40.2' +version: 0.1.0 +appVersion: 'v1.0.0' dependencies: - name: crds diff --git a/operations/helm/charts/alloy/README.md b/operations/helm/charts/alloy/README.md index 210249a348..640219358d 100644 --- a/operations/helm/charts/alloy/README.md +++ b/operations/helm/charts/alloy/README.md @@ -1,6 +1,6 @@ # Grafana Alloy Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.36.0](https://img.shields.io/badge/Version-0.36.0-informational?style=flat-square) ![AppVersion: v0.40.2](https://img.shields.io/badge/AppVersion-v0.40.2-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![AppVersion: v1.0.0](https://img.shields.io/badge/AppVersion-v1.0.0-informational?style=flat-square) Helm chart for deploying [Grafana Alloy][] to Kubernetes. diff --git a/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml index fdd2df0969..ca587b7b9c 100644 --- a/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml index 1de3c634c7..b0c6c4b829 100644 --- a/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/alloy/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml index 67b85b9397..0e09b6a17c 100644 --- a/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml index 520e373a6c..278ac6f3c8 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml index fdd2df0969..ca587b7b9c 100644 --- a/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml index 9541ab33c3..3c4deb3a92 100644 --- a/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/alloy/templates/controllers/deployment.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml index 2c7d54aed8..918ea5f642 100644 --- a/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/alloy/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml index 4306d0d521..978d9a7f80 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/alloy/templates/controllers/statefulset.yaml @@ -29,7 +29,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml index 1ee176d9d9..9598e99438 100644 --- a/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/alloy/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml index fdd2df0969..ca587b7b9c 100644 --- a/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml index fdd2df0969..ca587b7b9c 100644 --- a/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml index 72fc14e915..8d60204d1d 100644 --- a/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor-tls/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml index fdd2df0969..ca587b7b9c 100644 --- a/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml index b210d6a202..605f24ed6d 100644 --- a/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml index d5da2f01bb..596d8a71c1 100644 --- a/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml index ba538213a4..5198946515 100644 --- a/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml index b68ed7e8fd..5cf9738e87 100644 --- a/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml index 232baf4cd0..1e55d3b51a 100644 --- a/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml index 2dbc3aa049..7764c5c163 100644 --- a/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/alloy/templates/controllers/daemonset.yaml @@ -32,7 +32,7 @@ spec: - name: global-cred containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml index c066fd7be5..a6b6c4a8b7 100644 --- a/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: quay.io/grafana/alloy:v0.40.2 + image: quay.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml index 69ac09525d..7ec77a461f 100644 --- a/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/alloy/templates/controllers/daemonset.yaml @@ -45,7 +45,7 @@ spec: name: geoip containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml index 83f29f0ce3..a33405935e 100644 --- a/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/alloy/templates/controllers/daemonset.yaml @@ -29,7 +29,7 @@ spec: - name: local-cred containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml index c066fd7be5..a6b6c4a8b7 100644 --- a/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: quay.io/grafana/alloy:v0.40.2 + image: quay.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml index 32b03746c8..308fa65407 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml index 557600ede9..c8e328b3d9 100644 --- a/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/pod_annotations/alloy/templates/controllers/daemonset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml index 6f21c3078b..20629a6661 100644 --- a/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/alloy/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml index 2b0ff9dfc6..7cd624885e 100644 --- a/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml +++ b/operations/helm/tests/topologyspreadconstraints/alloy/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: alloy containers: - name: alloy - image: docker.io/grafana/alloy:v0.40.2 + image: docker.io/grafana/alloy:v1.0.0 imagePullPolicy: IfNotPresent args: - run From bcf88b9428bd4af3f91c38300a1d3cb4051fbc5a Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 12 Mar 2024 09:29:29 -0400 Subject: [PATCH 015/136] flowmode: change minimum stability level to "stable" (#26) --- internal/flowmode/cmd_run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index 0d7ae56aa9..aebc751dc2 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -52,7 +52,7 @@ func runCommand() *cobra.Command { inMemoryAddr: "agent.internal:12345", httpListenAddr: "127.0.0.1:12345", storagePath: "data-agent/", - minStability: featuregate.StabilityExperimental, + minStability: featuregate.StabilityStable, uiPrefix: "/", disableReporting: false, enablePprof: true, From 15ce0e8064e15f6bbd3c15ce207581f7282ec1e6 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 13 Mar 2024 10:17:44 -0400 Subject: [PATCH 016/136] usagestats: switch to alloy endpoint (#29) --- internal/usagestats/stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/usagestats/stats.go b/internal/usagestats/stats.go index 004bc6e6d0..33fffd157e 100644 --- a/internal/usagestats/stats.go +++ b/internal/usagestats/stats.go @@ -17,7 +17,7 @@ import ( var ( httpClient = http.Client{Timeout: 5 * time.Second} - usageStatsURL = "https://stats.grafana.org/agent-usage-report" + usageStatsURL = "https://stats.grafana.org/alloy-usage-report" ) // Report is the payload to be sent to stats.grafana.org From 6b660b8278a40ed035219f66168eb2e6f216dfb1 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 13 Mar 2024 11:21:16 -0400 Subject: [PATCH 017/136] component: graduate subset of components to stable (#30) We identified that a subset of components are ready to be marked stable. This commit moves all of those components to stable, and in one case, moves an experimental component to beta. --- internal/component/discovery/kubelet/kubelet.go | 2 +- internal/component/loki/echo/echo.go | 2 +- internal/component/loki/rules/kubernetes/rules.go | 2 +- internal/component/loki/source/kubernetes/kubernetes.go | 2 +- internal/component/loki/source/podlogs/podlogs.go | 2 +- internal/component/mimir/rules/kubernetes/rules.go | 2 +- .../component/otelcol/exporter/loadbalancing/loadbalancing.go | 2 +- .../extension/jaeger_remote_sampling/jaeger_remote_sampling.go | 2 +- .../processor/probabilistic_sampler/probabilistic_sampler.go | 2 +- .../otelcol/processor/resourcedetection/resourcedetection.go | 2 +- internal/component/otelcol/processor/span/span.go | 2 +- .../component/otelcol/processor/tail_sampling/tail_sampling.go | 2 +- internal/component/otelcol/receiver/loki/loki.go | 2 +- internal/component/otelcol/receiver/prometheus/prometheus.go | 2 +- internal/component/prometheus/operator/podmonitors/operator.go | 2 +- internal/component/prometheus/operator/probes/probes.go | 2 +- .../prometheus/operator/servicemonitors/servicemonitors.go | 2 +- internal/component/pyroscope/ebpf/ebpf_placeholder.go | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/component/discovery/kubelet/kubelet.go b/internal/component/discovery/kubelet/kubelet.go index a753c1e734..0e4717801e 100644 --- a/internal/component/discovery/kubelet/kubelet.go +++ b/internal/component/discovery/kubelet/kubelet.go @@ -61,7 +61,7 @@ var ( func init() { component.Register(component.Registration{ Name: "discovery.kubelet", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: discovery.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/loki/echo/echo.go b/internal/component/loki/echo/echo.go index 8ceb29d4a4..63e2fc373a 100644 --- a/internal/component/loki/echo/echo.go +++ b/internal/component/loki/echo/echo.go @@ -13,7 +13,7 @@ import ( func init() { component.Register(component.Registration{ Name: "loki.echo", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: Exports{}, diff --git a/internal/component/loki/rules/kubernetes/rules.go b/internal/component/loki/rules/kubernetes/rules.go index d2cd5ca87d..9f3d2c486d 100644 --- a/internal/component/loki/rules/kubernetes/rules.go +++ b/internal/component/loki/rules/kubernetes/rules.go @@ -32,7 +32,7 @@ import ( func init() { component.Register(component.Registration{ Name: "loki.rules.kubernetes", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: nil, Build: func(o component.Options, c component.Arguments) (component.Component, error) { diff --git a/internal/component/loki/source/kubernetes/kubernetes.go b/internal/component/loki/source/kubernetes/kubernetes.go index f9c4d3fa5b..577f362cb5 100644 --- a/internal/component/loki/source/kubernetes/kubernetes.go +++ b/internal/component/loki/source/kubernetes/kubernetes.go @@ -26,7 +26,7 @@ import ( func init() { component.Register(component.Registration{ Name: "loki.source.kubernetes", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/loki/source/podlogs/podlogs.go b/internal/component/loki/source/podlogs/podlogs.go index c7f28202d1..2577f8c18a 100644 --- a/internal/component/loki/source/podlogs/podlogs.go +++ b/internal/component/loki/source/podlogs/podlogs.go @@ -28,7 +28,7 @@ import ( func init() { component.Register(component.Registration{ Name: "loki.source.podlogs", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/mimir/rules/kubernetes/rules.go b/internal/component/mimir/rules/kubernetes/rules.go index db75be3dee..692e176266 100644 --- a/internal/component/mimir/rules/kubernetes/rules.go +++ b/internal/component/mimir/rules/kubernetes/rules.go @@ -32,7 +32,7 @@ import ( func init() { component.Register(component.Registration{ Name: "mimir.rules.kubernetes", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: nil, Build: func(o component.Options, c component.Arguments) (component.Component, error) { diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index 5bf2ac3337..ad0bac1cd1 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -25,7 +25,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.exporter.loadbalancing", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: otelcol.ConsumerExports{}, diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go index c23477fc38..2f5d3e1257 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go @@ -16,7 +16,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.extension.jaeger_remote_sampling", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index 2854e9be15..0ec2860750 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -15,7 +15,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.processor.probabilistic_sampler", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: otelcol.ConsumerExports{}, diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index de32c1c6ae..20ab52d0b5 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -33,7 +33,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.processor.resourcedetection", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: otelcol.ConsumerExports{}, diff --git a/internal/component/otelcol/processor/span/span.go b/internal/component/otelcol/processor/span/span.go index 70fa5979f4..9c1fb5ef30 100644 --- a/internal/component/otelcol/processor/span/span.go +++ b/internal/component/otelcol/processor/span/span.go @@ -18,7 +18,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.processor.span", - Stability: featuregate.StabilityExperimental, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: otelcol.ConsumerExports{}, diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go index 81e1cc929a..c06fd3a0a8 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go @@ -17,7 +17,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.processor.tail_sampling", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: otelcol.ConsumerExports{}, diff --git a/internal/component/otelcol/receiver/loki/loki.go b/internal/component/otelcol/receiver/loki/loki.go index f45d428c66..6904409399 100644 --- a/internal/component/otelcol/receiver/loki/loki.go +++ b/internal/component/otelcol/receiver/loki/loki.go @@ -22,7 +22,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.receiver.loki", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: Exports{}, diff --git a/internal/component/otelcol/receiver/prometheus/prometheus.go b/internal/component/otelcol/receiver/prometheus/prometheus.go index ba03333788..e27488296b 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus.go @@ -27,7 +27,7 @@ import ( func init() { component.Register(component.Registration{ Name: "otelcol.receiver.prometheus", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Exports: Exports{}, diff --git a/internal/component/prometheus/operator/podmonitors/operator.go b/internal/component/prometheus/operator/podmonitors/operator.go index 5da23d3b1a..b420b404d4 100644 --- a/internal/component/prometheus/operator/podmonitors/operator.go +++ b/internal/component/prometheus/operator/podmonitors/operator.go @@ -10,7 +10,7 @@ import ( func init() { component.Register(component.Registration{ Name: "prometheus.operator.podmonitors", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/prometheus/operator/probes/probes.go b/internal/component/prometheus/operator/probes/probes.go index fba2b3e301..42fb6ace17 100644 --- a/internal/component/prometheus/operator/probes/probes.go +++ b/internal/component/prometheus/operator/probes/probes.go @@ -10,7 +10,7 @@ import ( func init() { component.Register(component.Registration{ Name: "prometheus.operator.probes", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/prometheus/operator/servicemonitors/servicemonitors.go b/internal/component/prometheus/operator/servicemonitors/servicemonitors.go index c6379f65c5..8c86c8d07d 100644 --- a/internal/component/prometheus/operator/servicemonitors/servicemonitors.go +++ b/internal/component/prometheus/operator/servicemonitors/servicemonitors.go @@ -10,7 +10,7 @@ import ( func init() { component.Register(component.Registration{ Name: "prometheus.operator.servicemonitors", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { diff --git a/internal/component/pyroscope/ebpf/ebpf_placeholder.go b/internal/component/pyroscope/ebpf/ebpf_placeholder.go index ad46fa2cc5..18e6aabe5c 100644 --- a/internal/component/pyroscope/ebpf/ebpf_placeholder.go +++ b/internal/component/pyroscope/ebpf/ebpf_placeholder.go @@ -13,7 +13,7 @@ import ( func init() { component.Register(component.Registration{ Name: "pyroscope.ebpf", - Stability: featuregate.StabilityBeta, + Stability: featuregate.StabilityStable, Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { From 3337557eab402cc4de85c93f3adfe9134aa86a98 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 15 Mar 2024 15:03:13 -0400 Subject: [PATCH 018/136] Remove prometheus.exporter.vsphere (#35) Co-authored-by: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../sources/reference/compatibility/_index.md | 1 - .../components/prometheus.exporter.vsphere.md | 102 ------------------ docs/sources/release-notes.md | 2 +- internal/component/all/all.go | 1 - .../prometheus/exporter/vsphere/vsphere.go | 65 ----------- .../exporter/vsphere/vsphere_test.go | 59 ---------- .../staticconvert/internal/build/builder.go | 4 - .../internal/build/vmware_exporter.go | 25 ----- .../testdata-v2/integrations_v2.river | 26 ----- .../testdata-v2/integrations_v2.yaml | 14 +-- .../testdata-v2/unsupported.diags | 3 +- .../testdata-v2/unsupported.yaml | 12 ++- .../internal/staticconvert/validate.go | 5 + 13 files changed, 21 insertions(+), 298 deletions(-) delete mode 100644 docs/sources/reference/components/prometheus.exporter.vsphere.md delete mode 100644 internal/component/prometheus/exporter/vsphere/vsphere.go delete mode 100644 internal/component/prometheus/exporter/vsphere/vsphere_test.go delete mode 100644 internal/converter/internal/staticconvert/internal/build/vmware_exporter.go diff --git a/docs/sources/reference/compatibility/_index.md b/docs/sources/reference/compatibility/_index.md index d3d3154f49..c34d9281bf 100644 --- a/docs/sources/reference/compatibility/_index.md +++ b/docs/sources/reference/compatibility/_index.md @@ -105,7 +105,6 @@ The following components, grouped by namespace, _export_ Targets. - [prometheus.exporter.squid](../components/prometheus.exporter.squid) - [prometheus.exporter.statsd](../components/prometheus.exporter.statsd) - [prometheus.exporter.unix](../components/prometheus.exporter.unix) -- [prometheus.exporter.vsphere](../components/prometheus.exporter.vsphere) - [prometheus.exporter.windows](../components/prometheus.exporter.windows) {{< /collapse >}} diff --git a/docs/sources/reference/components/prometheus.exporter.vsphere.md b/docs/sources/reference/components/prometheus.exporter.vsphere.md deleted file mode 100644 index 869cc61a75..0000000000 --- a/docs/sources/reference/components/prometheus.exporter.vsphere.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.vsphere/ -title: prometheus.exporter.vsphere -description: Learn about prometheus.exporter.vsphere ---- - -# prometheus.exporter.vsphere - -The `prometheus.exporter.vsphere` component embeds [`vmware_exporter`](https://github.com/grafana/vmware_exporter) to collect vSphere metrics - -{{< admonition type="note" >}} -We recommend to use [otelcol.receiver.vcenter][] instead. - -[otelcol.receiver.vcenter]: ./otelcol.receiver.vcenter/ -{{< /admonition >}} - -## Usage - -```river -prometheus.exporter.vsphere "LABEL" { -} -``` - -## Arguments - -You can use the following arguments to configure the exporter's behavior. -Omitted fields take their default values. - -| Name | Type | Description | Default | Required | -| ---------------------------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | -| `vsphere_url` | `string` | The url of the vCenter endpoint SDK | | no | -| `vsphere_user` | `string` | vCenter username. | | no | -| `vsphere_password` | `secret` | vCenter password. | | no | -| `request_chunk_size` | `int` | Number of managed objects to include in each request to vsphere when fetching performance counters. | `256` | no | -| `collect_concurrency` | `int` | Number of concurrent requests to vSphere when fetching performance counters. | `8` | no | -| `discovery_interval` | `duration` | Interval on which to run vSphere managed object discovery. | `0` | no | -| `enable_exporter_metrics` | `boolean` | Enable the exporter metrics. | `true` | no | - -- Setting `discovery_interval` to a non-zero value will result in object discovery running in the background. Each scrape will use object data gathered during the last discovery. When this value is 0, object discovery occurs per scrape. - - -## Exported fields - -{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} - -## Component health - -`prometheus.exporter.vsphere` is only reported as unhealthy if given -an invalid configuration. In those cases, exported fields retain their last -healthy values. - -## Debug information - -`prometheus.exporter.vsphere` does not expose any component-specific -debug information. - -## Debug metrics - -`prometheus.exporter.vsphere` does not expose any component-specific -debug metrics. - -## Example - -This example uses a [`prometheus.scrape` component][scrape] to collect metrics -from `prometheus.exporter.vsphere`: - -```river -prometheus.exporter.vsphere "example" { - vsphere_url = "https://127.0.0.1:8989/sdk" - vsphere_user = "user" - vsphere_password = "pass" -} - -// Configure a prometheus.scrape component to collect vsphere metrics. -prometheus.scrape "demo" { - targets = prometheus.exporter.vsphere.example.targets - forward_to = [ prometheus.remote_write.default.receiver ] -} - -prometheus.remote_write "default" { - endpoint { - url = "REMOTE_WRITE_URL" - } -} -``` - -[scrape]: ../prometheus.scrape/ - - - -## Compatible components - -`prometheus.exporter.vsphere` has exports that can be consumed by the following components: - -- Components that consume [Targets](../../compatibility/#targets-consumers) - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index a665a5010c..2d282a0235 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -12,4 +12,4 @@ The release notes provide information about deprecations and breaking changes in For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog][]. -[Changelog]: https://github.com/grafana/alloy/blob/main/CHANGELOG.md \ No newline at end of file +[Changelog]: https://github.com/grafana/alloy/blob/main/CHANGELOG.md diff --git a/internal/component/all/all.go b/internal/component/all/all.go index 7cc147ea80..3369357d50 100644 --- a/internal/component/all/all.go +++ b/internal/component/all/all.go @@ -117,7 +117,6 @@ import ( _ "github.com/grafana/agent/internal/component/prometheus/exporter/squid" // Import prometheus.exporter.squid _ "github.com/grafana/agent/internal/component/prometheus/exporter/statsd" // Import prometheus.exporter.statsd _ "github.com/grafana/agent/internal/component/prometheus/exporter/unix" // Import prometheus.exporter.unix - _ "github.com/grafana/agent/internal/component/prometheus/exporter/vsphere" // Import prometheus.exporter.vsphere _ "github.com/grafana/agent/internal/component/prometheus/exporter/windows" // Import prometheus.exporter.windows _ "github.com/grafana/agent/internal/component/prometheus/operator/podmonitors" // Import prometheus.operator.podmonitors _ "github.com/grafana/agent/internal/component/prometheus/operator/probes" // Import prometheus.operator.probes diff --git a/internal/component/prometheus/exporter/vsphere/vsphere.go b/internal/component/prometheus/exporter/vsphere/vsphere.go deleted file mode 100644 index aeb35d1dd8..0000000000 --- a/internal/component/prometheus/exporter/vsphere/vsphere.go +++ /dev/null @@ -1,65 +0,0 @@ -package vsphere - -import ( - "time" - - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/vmware_exporter" - "github.com/grafana/river/rivertypes" - config_util "github.com/prometheus/common/config" -) - -func init() { - component.Register(component.Registration{ - Name: "prometheus.exporter.vsphere", - Stability: featuregate.StabilityStable, - Args: Arguments{}, - Exports: exporter.Exports{}, - - Build: exporter.New(createExporter, "vsphere"), - }) -} - -func createExporter(opts component.Options, args component.Arguments, defaultInstanceKey string) (integrations.Integration, string, error) { - a := args.(Arguments) - return integrations.NewIntegrationWithInstanceKey(opts.Logger, a.Convert(), defaultInstanceKey) -} - -// DefaultArguments holds the default settings for the vsphere exporter -var DefaultArguments = Arguments{ - ChunkSize: 256, - CollectConcurrency: 8, - ObjectDiscoveryInterval: 0, - EnableExporterMetrics: true, -} - -// Arguments controls the vsphere exporter. -type Arguments struct { - ChunkSize int `river:"request_chunk_size,attr,optional"` - CollectConcurrency int `river:"collect_concurrency,attr,optional"` - VSphereURL string `river:"vsphere_url,attr,optional"` - VSphereUser string `river:"vsphere_user,attr,optional"` - VSpherePass rivertypes.Secret `river:"vsphere_password,attr,optional"` - ObjectDiscoveryInterval time.Duration `river:"discovery_interval,attr,optional"` - EnableExporterMetrics bool `river:"enable_exporter_metrics,attr,optional"` -} - -// SetToDefault implements river.Defaulter. -func (a *Arguments) SetToDefault() { - *a = DefaultArguments -} - -func (a *Arguments) Convert() *vmware_exporter.Config { - return &vmware_exporter.Config{ - ChunkSize: a.ChunkSize, - CollectConcurrency: a.CollectConcurrency, - VSphereURL: a.VSphereURL, - VSphereUser: a.VSphereUser, - VSpherePass: config_util.Secret(a.VSpherePass), - ObjectDiscoveryInterval: a.ObjectDiscoveryInterval, - EnableExporterMetrics: a.EnableExporterMetrics, - } -} diff --git a/internal/component/prometheus/exporter/vsphere/vsphere_test.go b/internal/component/prometheus/exporter/vsphere/vsphere_test.go deleted file mode 100644 index edf5ebfa89..0000000000 --- a/internal/component/prometheus/exporter/vsphere/vsphere_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package vsphere - -import ( - "testing" - - "github.com/grafana/agent/internal/static/integrations/vmware_exporter" - "github.com/grafana/river" - "github.com/stretchr/testify/require" -) - -func TestRiverUnmarshal(t *testing.T) { - riverConfig := ` - request_chunk_size = 256 - collect_concurrency = 8 - vsphere_url = "https://localhost:443/sdk" - vsphere_user = "user" - vsphere_password = "pass" - discovery_interval = 0 - enable_exporter_metrics = true - ` - var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) - - require.NoError(t, err) - expected := Arguments{ - ChunkSize: 256, - CollectConcurrency: 8, - VSphereURL: "https://localhost:443/sdk", - VSphereUser: "user", - VSpherePass: "pass", - ObjectDiscoveryInterval: 0, - EnableExporterMetrics: true, - } - require.Equal(t, expected, args) -} - -func TestRiverConvert(t *testing.T) { - orig := Arguments{ - ChunkSize: 256, - CollectConcurrency: 8, - VSphereURL: "https://localhost:443/sdk", - VSphereUser: "user", - VSpherePass: "pass", - ObjectDiscoveryInterval: 0, - EnableExporterMetrics: true, - } - converted := orig.Convert() - expected := vmware_exporter.Config{ - ChunkSize: 256, - CollectConcurrency: 8, - VSphereURL: "https://localhost:443/sdk", - VSphereUser: "user", - VSpherePass: "pass", - ObjectDiscoveryInterval: 0, - EnableExporterMetrics: true, - } - - require.Equal(t, expected, *converted) -} diff --git a/internal/converter/internal/staticconvert/internal/build/builder.go b/internal/converter/internal/staticconvert/internal/build/builder.go index 68e35d2880..1f8c695031 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder.go +++ b/internal/converter/internal/staticconvert/internal/build/builder.go @@ -45,7 +45,6 @@ import ( eventhandler_v2 "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" metricsutils_v2 "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" - vmware_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/vmware_exporter" "github.com/grafana/agent/internal/static/integrations/windows_exporter" "github.com/grafana/river/scanner" "github.com/grafana/river/token/builder" @@ -239,9 +238,6 @@ func (b *IntegrationsConfigBuilder) appendV2Integrations() { case *snmp_exporter_v2.Config: exports = b.appendSnmpExporterV2(itg) commonConfig = itg.Common - case *vmware_exporter_v2.Config: - exports = b.appendVmwareExporterV2(itg) - commonConfig = itg.Common case *metricsutils_v2.ConfigShim: commonConfig = itg.Common switch v1_itg := itg.Orig.(type) { diff --git a/internal/converter/internal/staticconvert/internal/build/vmware_exporter.go b/internal/converter/internal/staticconvert/internal/build/vmware_exporter.go deleted file mode 100644 index 656395b668..0000000000 --- a/internal/converter/internal/staticconvert/internal/build/vmware_exporter.go +++ /dev/null @@ -1,25 +0,0 @@ -package build - -import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/vsphere" - vmware_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/vmware_exporter" - "github.com/grafana/river/rivertypes" -) - -func (b *IntegrationsConfigBuilder) appendVmwareExporterV2(config *vmware_exporter_v2.Config) discovery.Exports { - args := toVmwareExporter(config) - return b.appendExporterBlock(args, config.Name(), nil, "vsphere") -} - -func toVmwareExporter(config *vmware_exporter_v2.Config) *vsphere.Arguments { - return &vsphere.Arguments{ - ChunkSize: config.ChunkSize, - CollectConcurrency: config.CollectConcurrency, - VSphereURL: config.VSphereURL, - VSphereUser: config.VSphereUser, - VSpherePass: rivertypes.Secret(config.VSpherePass), - ObjectDiscoveryInterval: config.ObjectDiscoveryInterval, - EnableExporterMetrics: config.EnableExporterMetrics, - } -} diff --git a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river index c609330be6..f7c22ade64 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river +++ b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river @@ -762,29 +762,3 @@ prometheus.scrape "integrations_snmp" { forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/snmp" } - -prometheus.exporter.vsphere "integrations_vsphere" { - vsphere_url = "https://127.0.0.1:8989/sdk" - vsphere_user = "user" - vsphere_password = "pass" -} - -discovery.relabel "integrations_vsphere" { - targets = prometheus.exporter.vsphere.integrations_vsphere.targets - - rule { - target_label = "instance" - replacement = "vsphere" - } - - rule { - target_label = "job" - replacement = "integrations/vsphere" - } -} - -prometheus.scrape "integrations_vsphere" { - targets = discovery.relabel.integrations_vsphere.output - forward_to = [prometheus.remote_write.metrics_default.receiver] - job_name = "integrations/vsphere" -} diff --git a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml index cd0c497d15..7a5a4ee730 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml +++ b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml @@ -1,7 +1,7 @@ server: log_level: ${SOME_ENVIRONMENT_VARIABLE:='debug'} log_format: json - + metrics: global: remote_write: @@ -182,7 +182,7 @@ integrations: oracledb_configs: - connection_string: oracle://user:password@localhost:1521/orcl.localnet postgres_configs: - - data_source_names: + - data_source_names: - postgres://postgres:password@localhost:5432/postgres?sslmode=disable autoscrape: relabel_configs: @@ -228,13 +228,3 @@ integrations: statsd: autoscrape: metrics_instance: "default" - vsphere_configs: - - vsphere_url: https://127.0.0.1:8989/sdk - vsphere_user: user - vsphere_password: pass - request_chunk_size: 256 - collect_concurrency: 8 - instance: vsphere - autoscrape: - enable: true - metrics_instance: default \ No newline at end of file diff --git a/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags b/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags index 48eae120a2..85fb306a8b 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags +++ b/internal/converter/internal/staticconvert/testdata-v2/unsupported.diags @@ -3,4 +3,5 @@ (Error) The converter does not support converting the provided eventhandler flush_interval config: this field is not configurable in flow mode (Warning) The eventhandler cache_path is unnecessary in flow mode because the storage path is governed by the --storage.path cmd argument and is always local to the component. (Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Error) The converter does not support converting the provided app_agent_receiver traces_instance config. \ No newline at end of file +(Error) The converter does not support converting the provided app_agent_receiver traces_instance config. +(Error) Support for the vsphere integration has been removed in Grafana Alloy v1.0, and conversion will not be performed.\nTo achieve similar functionality, consider creating an otelcol.receiver.vcenter component and converting generated metrics to a Prometheus pipeline using otelcol.exporter.prometheus. diff --git a/internal/converter/internal/staticconvert/testdata-v2/unsupported.yaml b/internal/converter/internal/staticconvert/testdata-v2/unsupported.yaml index dfce6ed22e..25a438ee01 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/unsupported.yaml +++ b/internal/converter/internal/staticconvert/testdata-v2/unsupported.yaml @@ -25,4 +25,14 @@ integrations: logs_instance: "log_config" send_timeout: 30 informer_resync: 30 - flush_interval: 30 \ No newline at end of file + flush_interval: 30 + vsphere_configs: + - vsphere_url: https://127.0.0.1:8989/sdk + vsphere_user: user + vsphere_password: pass + request_chunk_size: 256 + collect_concurrency: 8 + instance: vsphere + autoscrape: + enable: true + metrics_instance: default diff --git a/internal/converter/internal/staticconvert/validate.go b/internal/converter/internal/staticconvert/validate.go index 024b57b182..a703dd5a43 100644 --- a/internal/converter/internal/staticconvert/validate.go +++ b/internal/converter/internal/staticconvert/validate.go @@ -175,6 +175,11 @@ func validateIntegrationsV2(integrationsConfig *v2.SubsystemOptions) diag.Diagno case *eventhandler_v2.Config: case *snmp_exporter_v2.Config: case *vmware_exporter_v2.Config: + diags.AddWithDetail( + diag.SeverityLevelError, + "Support for the vsphere integration has been removed in Grafana Alloy v1.0, and conversion will not be performed.", + "To achieve similar functionality, consider creating an otelcol.receiver.vcenter component and converting generated metrics to a Prometheus pipeline using otelcol.exporter.prometheus.", + ) case *metricsutils_v2.ConfigShim: switch v1_itg := itg.Orig.(type) { case *azure_exporter.Config: From 69d4e55d9cd37aad14febb2f4ac0c30c5f6a7059 Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Mon, 18 Mar 2024 01:11:02 -0700 Subject: [PATCH 019/136] Update topics to replace instances of Agent with Alloy (#34) * Replace agent with alloy in cli topics * Replace agent with alloy in config blocks * Replace agent with alloy in stdlib topics * Replace agent with alloy in cli and tasks * Repalce agent with alloy in get started and run topics * Replace agent with alloy in loadbalancing topic * Remove agent from convert command * Update Mimir rules topic * Clean up tutorial and fix broken link * Update agent to alloy in migrate tasks * Fix another grafana-agent and rename to grafana-alloy --- docs/sources/get-started/install/_index.md | 4 +- docs/sources/get-started/install/ansible.md | 29 ++- docs/sources/get-started/install/binary.md | 6 +- docs/sources/get-started/install/chef.md | 14 +- docs/sources/get-started/install/docker.md | 16 +- .../sources/get-started/install/kubernetes.md | 10 +- docs/sources/get-started/install/linux.md | 14 +- docs/sources/get-started/install/macos.md | 8 +- docs/sources/get-started/install/puppet.md | 22 +- docs/sources/get-started/install/windows.md | 14 +- docs/sources/get-started/run/binary.md | 27 ++- docs/sources/get-started/run/linux.md | 16 +- docs/sources/get-started/run/macos.md | 10 +- docs/sources/reference/cli/_index.md | 4 +- docs/sources/reference/cli/convert.md | 7 +- docs/sources/reference/cli/fmt.md | 3 +- docs/sources/reference/cli/run.md | 9 +- docs/sources/reference/cli/tools.md | 9 +- .../reference/components/loki.process.md | 20 +- .../components/loki.rules.kubernetes.md | 18 +- .../components/mimir.rules.kubernetes.md | 30 +-- .../otelcol.exporter.loadbalancing.md | 204 +++++++++--------- .../reference/components/pyroscope.java.md | 8 +- .../reference/config-blocks/remotecfg.md | 4 +- .../reference/config-blocks/tracing.md | 2 +- docs/sources/reference/stdlib/env.md | 2 +- .../tasks/collect-prometheus-metrics.md | 4 +- ...ering.md => configure-alloy-clustering.md} | 6 +- docs/sources/tasks/configure/_index.md | 6 +- .../tasks/configure/configure-linux.md | 18 +- .../tasks/configure/configure-macos.md | 12 +- .../tasks/configure/configure-windows.md | 8 +- docs/sources/tasks/debug.md | 4 +- .../distribute-prometheus-scrape-load.md | 2 +- docs/sources/tasks/migrate/from-operator.md | 10 +- docs/sources/tasks/migrate/from-prometheus.md | 24 ++- docs/sources/tasks/migrate/from-promtail.md | 16 ++ docs/sources/tasks/migrate/from-static.md | 21 ++ .../tasks/monitor/component_metrics.md | 4 +- .../tasks/monitor/controller_metrics.md | 14 +- .../tasks/opentelemetry-to-lgtm-stack.md | 8 +- .../collecting-prometheus-metrics.md | 3 +- .../first-components-and-stdlib/index.md | 8 +- .../tutorials/flow-by-example/get-started.md | 2 +- .../flow-by-example/processing-logs/index.md | 2 +- 45 files changed, 357 insertions(+), 325 deletions(-) rename docs/sources/tasks/{configure-agent-clustering.md => configure-alloy-clustering.md} (96%) diff --git a/docs/sources/get-started/install/_index.md b/docs/sources/get-started/install/_index.md index 4ccae7825e..01df69dc9c 100644 --- a/docs/sources/get-started/install/_index.md +++ b/docs/sources/get-started/install/_index.md @@ -1,8 +1,8 @@ --- canonical: https://grafana.com/docs/alloy/latest/get-started/install/ -description: Learn how to install Grafana Agent Flow +description: Learn how to install Grafana Alloy menuTitle: Install -title: Install Grafana Agent Flow +title: Install Grafana Alloy weight: 50 --- diff --git a/docs/sources/get-started/install/ansible.md b/docs/sources/get-started/install/ansible.md index bbd8209f89..1b21165e90 100644 --- a/docs/sources/get-started/install/ansible.md +++ b/docs/sources/get-started/install/ansible.md @@ -19,23 +19,22 @@ You can use Ansible to install and manage {{< param "PRODUCT_NAME" >}} on Linux To add {{% param "PRODUCT_NAME" %}} to a host: -1. Create a file named `grafana-agent.yml` and add the following: +1. Create a file named `grafana-alloy.yml` and add the following: ```yaml - - name: Install Grafana Agent Flow + - name: Install Grafana Alloy hosts: all become: true tasks: - - name: Install Grafana Agent Flow + - name: Install Grafana Alloy ansible.builtin.include_role: - name: grafana.grafana.grafana_agent + name: grafana.grafana.grafana_alloy vars: - grafana_agent_mode: flow # Destination file name - grafana_agent_config_filename: config.river + grafana_alloy_config_filename: config.river # Local file to copy - grafana_agent_provisioned_config_file: "" - grafana_agent_flags_extra: + grafana_alloy_provisioned_config_file: "" + grafana_alloy_flags_extra: server.http.listen-addr: '0.0.0.0:12345' ``` @@ -45,7 +44,7 @@ To add {{% param "PRODUCT_NAME" %}} to a host: 1. Run the Ansible playbook. Open a terminal window and run the following command from the Ansible playbook directory. ```shell - ansible-playbook grafana-agent.yml + ansible-playbook grafana-alloy.yml ``` ## Validate @@ -53,21 +52,21 @@ To add {{% param "PRODUCT_NAME" %}} to a host: To verify that the {{< param "PRODUCT_NAME" >}} service on the target machine is `active` and `running`, open a terminal window and run the following command: ```shell -$ sudo systemctl status grafana-agent.service +$ sudo systemctl status grafana-alloy.service ``` If the service is `active` and `running`, the output should look similar to this: ``` -grafana-agent.service - Grafana Agent - Loaded: loaded (/etc/systemd/system/grafana-agent.service; enabled; vendor preset: enabled) +grafana-alloy.service - Grafana Alloy + Loaded: loaded (/etc/systemd/system/grafana-alloy.service; enabled; vendor preset: enabled) Active: active (running) since Wed 2022-07-20 09:56:15 UTC; 36s ago -Main PID: 3176 (agent-linux-amd) +Main PID: 3176 (alloy-linux-amd) Tasks: 8 (limit: 515) Memory: 92.5M CPU: 380ms - CGroup: /system.slice/grafana-agent.service - └─3176 /usr/local/bin/agent-linux-amd64 --config.file=/etc/grafana-cloud/agent-config.yaml + CGroup: /system.slice/grafana-alloy.service + └─3176 /usr/local/bin/alloy-linux-amd64 --config.file=/etc/grafana-cloud/alloy-config.yaml ``` ## Next steps diff --git a/docs/sources/get-started/install/binary.md b/docs/sources/get-started/install/binary.md index d58d142742..9f418dad78 100644 --- a/docs/sources/get-started/install/binary.md +++ b/docs/sources/get-started/install/binary.md @@ -1,8 +1,8 @@ --- canonical: https://grafana.com/docs/alloy/latest/get-started/install/binary/ -description: Learn how to install Grafana Agent Flow as a standalone binary +description: Learn how to install Grafana Alloy as a standalone binary menuTitle: Standalone -title: Install Grafana Agent Flow as a standalone binary +title: Install Grafana Alloy as a standalone binary weight: 600 --- @@ -23,7 +23,7 @@ To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the fol 1. Scroll down to the **Assets** section. -1. Download the `grafana-agent` zip file that matches your operating system and machine's architecture. +1. Download the `grafana-alloy` zip file that matches your operating system and machine's architecture. 1. Extract the package contents into a directory. diff --git a/docs/sources/get-started/install/chef.md b/docs/sources/get-started/install/chef.md index 1f17d1c569..cabbbebfaf 100644 --- a/docs/sources/get-started/install/chef.md +++ b/docs/sources/get-started/install/chef.md @@ -66,26 +66,26 @@ To add {{< param "PRODUCT_NAME" >}} to a host: end ``` -1. Add the following resources to install and enable the `grafana-agent-flow` service: +1. Add the following resources to install and enable the `grafana-alloy` service: ```ruby - package 'grafana-agent-flow' do + package 'grafana-alloy' do action :install flush_cache [ :before ] if platform_family?('amazon', 'rhel', 'fedora') - notifies :restart, 'service[grafana-agent-flow]', :delayed + notifies :restart, 'service[grafana-alloy]', :delayed end - service 'grafana-agent-flow' do - service_name 'grafana-agent-flow' + service 'grafana-alloy' do + service_name 'grafana-alloy' action [:enable, :start] end ``` ## Configuration -The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. +The `grafana-alloy` package installs a default configuration file that doesn't send telemetry anywhere. -The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration or create a new configuration file for the service to use. +The default configuration file location is `/etc/grafana-alloy.river`. You can replace this file with your own configuration or create a new configuration file for the service to use. ## Next steps diff --git a/docs/sources/get-started/install/docker.md b/docs/sources/get-started/install/docker.md index 8db56e706b..53e8ad270e 100644 --- a/docs/sources/get-started/install/docker.md +++ b/docs/sources/get-started/install/docker.md @@ -31,11 +31,10 @@ To run {{< param "PRODUCT_NAME" >}} as a Linux Docker container, run the followi ```shell docker run \ - -e AGENT_MODE=flow \ - -v :/etc/agent/config.river \ + -v :/etc/alloy/config.river \ -p 12345:12345 \ - grafana/agent:latest \ - run --server.http.listen-addr=0.0.0.0:12345 /etc/agent/config.river + grafana/alloy:latest \ + run --server.http.listen-addr=0.0.0.0:12345 /etc/alloy/config.river ``` Replace the following: @@ -56,11 +55,10 @@ To run {{< param "PRODUCT_NAME" >}} as a Windows Docker container, run the follo ```shell docker run \ - -e AGENT_MODE=flow \ - -v :C:\etc\grafana-agent\config.river \ + -v :C:\etc\grafana-alloy\config.river \ -p 12345:12345 \ - grafana/agent:latest-windows \ - run --server.http.listen-addr=0.0.0.0:12345 C:\etc\grafana-agent\config.river + grafana/alloy:latest-windows \ + run --server.http.listen-addr=0.0.0.0:12345 C:\etc\grafana-alloy\config.river ``` Replace the following: @@ -83,4 +81,4 @@ To verify that {{< param "PRODUCT_NAME" >}} is running successfully, navigate to [Windows containers]: #run-a-windows-docker-container [Docker]: https://docker.io [run]: ../../../reference/cli/run/ -[UI]: ../../../tasks/debug/#grafana-agent-flow-ui +[UI]: ../../../tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/get-started/install/kubernetes.md b/docs/sources/get-started/install/kubernetes.md index 68f93fb150..25f38b1fab 100644 --- a/docs/sources/get-started/install/kubernetes.md +++ b/docs/sources/get-started/install/kubernetes.md @@ -1,8 +1,8 @@ --- canonical: https://grafana.com/docs/alloy/latest/get-started/install/kubernetes/ -description: Learn how to deploy Grafana Agent Flow on Kubernetes +description: Learn how to deploy Grafana Alloy on Kubernetes menuTitle: Kubernetes -title: Deploy Grafana Agent Flow on Kubernetes +title: Deploy Grafana Alloy on Kubernetes weight: 200 --- @@ -35,12 +35,12 @@ To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the fo 1. Install {{< param "PRODUCT_ROOT_NAME" >}}: ```shell - helm install grafana/grafana-agent + helm install grafana/grafana-alloy ``` Replace the following: - - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. + - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-alloy`. For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. @@ -49,5 +49,5 @@ For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Helm]: https://helm.sh -[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent +[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-alloy [Configure]: ../../../tasks/configure/configure-kubernetes/ diff --git a/docs/sources/get-started/install/linux.md b/docs/sources/get-started/install/linux.md index a2ab220a67..20c99a10c6 100644 --- a/docs/sources/get-started/install/linux.md +++ b/docs/sources/get-started/install/linux.md @@ -57,15 +57,15 @@ sslcacert=/etc/pki/tls/certs/ca-bundle.crt' | sudo tee /etc/yum.repos.d/grafana. {{< code >}} ```debian-ubuntu - sudo apt-get install grafana-agent-flow + sudo apt-get install grafana-alloy ``` ```rhel-fedora - sudo dnf install grafana-agent-flow + sudo dnf install grafana-alloy ``` ```suse-opensuse - sudo zypper install grafana-agent-flow + sudo zypper install grafana-alloy ``` {{< /code >}} @@ -76,22 +76,22 @@ To uninstall {{< param "PRODUCT_NAME" >}} on Linux, run the following commands i 1. Stop the systemd service for {{< param "PRODUCT_NAME" >}}. ```All-distros - sudo systemctl stop grafana-agent-flow + sudo systemctl stop grafana-alloy ``` 1. Uninstall {{< param "PRODUCT_NAME" >}}. {{< code >}} ```debian-ubuntu - sudo apt-get remove grafana-agent-flow + sudo apt-get remove grafana-alloy ``` ```rhel-fedora - sudo dnf remove grafana-agent-flow + sudo dnf remove grafana-alloy ``` ```suse-opensuse - sudo zypper remove grafana-agent-flow + sudo zypper remove grafana-alloy ``` {{< /code >}} diff --git a/docs/sources/get-started/install/macos.md b/docs/sources/get-started/install/macos.md index f151f0fd6a..56720b0574 100644 --- a/docs/sources/get-started/install/macos.md +++ b/docs/sources/get-started/install/macos.md @@ -33,7 +33,7 @@ To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in 1. Install {{< param "PRODUCT_NAME" >}}: ```shell - brew install grafana-agent-flow + brew install grafana-alloy ``` ## Upgrade @@ -43,13 +43,13 @@ To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in 1. Upgrade {{< param "PRODUCT_NAME" >}}: ```shell - brew upgrade grafana-agent-flow + brew upgrade grafana-alloy ``` 1. Restart {{< param "PRODUCT_NAME" >}}: ```shell - brew services restart grafana-agent-flow + brew services restart grafana-alloy ``` ## Uninstall @@ -57,7 +57,7 @@ To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: ```shell -brew uninstall grafana-agent-flow +brew uninstall grafana-alloy ``` ## Next steps diff --git a/docs/sources/get-started/install/puppet.md b/docs/sources/get-started/install/puppet.md index 021221ab45..f4daa7de76 100644 --- a/docs/sources/get-started/install/puppet.md +++ b/docs/sources/get-started/install/puppet.md @@ -1,8 +1,8 @@ --- canonical: https://grafana.com/docs/alloy/latest/get-started/install/puppet/ -description: Learn how to install Grafana Agent Flow with Puppet +description: Learn how to install Grafana Alloy with Puppet menuTitle: Puppet -title: Install Grafana Agent Flow with Puppet +title: Install Grafana Alloy with Puppet weight: 560 --- @@ -49,13 +49,13 @@ To add {{< param "PRODUCT_NAME" >}} to a host: id => 'B53AE77BADB630A683046005963FA27710458545', source => 'https://apt.grafana.com/gpg.key', }, - } -> package { 'grafana-agent-flow': + } -> package { 'grafana-alloy': require => Exec['apt_update'], - } -> service { 'grafana-agent-flow': + } -> service { 'grafana-alloy': ensure => running, - name => 'grafana-agent-flow', + name => 'grafana-alloy', enable => true, - subscribe => Package['grafana-agent-flow'], + subscribe => Package['grafana-alloy'], } } 'redhat': { @@ -68,12 +68,12 @@ To add {{< param "PRODUCT_NAME" >}} to a host: enabled => '1', gpgcheck => '1', target => '/etc/yum.repo.d/grafana.repo', - } -> package { 'grafana-agent-flow': - } -> service { 'grafana-agent-flow': + } -> package { 'grafana-alloy': + } -> service { 'grafana-alloy': ensure => running, - name => 'grafana-agent-flow', + name => 'grafana-alloy', enable => true, - subscribe => Package['grafana-agent-flow'], + subscribe => Package['grafana-alloy'], } } default: { @@ -86,7 +86,7 @@ To add {{< param "PRODUCT_NAME" >}} to a host: 1. To use this class in a module, add the following line to the module's `init.pp` file: ```ruby - include grafana_agent::grafana_agent_flow + include grafana_alloy::grafana_alloy ``` ## Configuration diff --git a/docs/sources/get-started/install/windows.md b/docs/sources/get-started/install/windows.md index ba827e3c46..a1df210fee 100644 --- a/docs/sources/get-started/install/windows.md +++ b/docs/sources/get-started/install/windows.md @@ -18,13 +18,13 @@ To do a standard graphical install of {{< param "PRODUCT_NAME" >}} on Windows, p 1. Scroll down to the **Assets** section. -1. Download the file called `grafana-agent-flow-installer.exe.zip`. +1. Download the file called `grafana-alloy-installer.exe.zip`. 1. Unzip the downloaded file. -1. Double-click on `grafana-agent-installer.exe` to install {{< param "PRODUCT_NAME" >}}. +1. Double-click on `grafana-alloy-installer.exe` to install {{< param "PRODUCT_NAME" >}}. -{{< param "PRODUCT_NAME" >}} is installed into the default directory `C:\Program Files\Grafana Agent Flow`. +{{< param "PRODUCT_NAME" >}} is installed into the default directory `C:\Program Files\Grafana Alloy`. ## Silent install @@ -34,7 +34,7 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f 1. Scroll down to the **Assets** section. -1. Download the file called `grafana-agent-flow-installer.exe.zip`. +1. Download the file called `grafana-alloy-installer.exe.zip`. 1. Unzip the downloaded file. @@ -57,10 +57,10 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f ## Service Configuration -{{< param "PRODUCT_NAME" >}} uses the Windows Registry `HKLM\Software\Grafana\Grafana Agent Flow` for service configuration. +{{< param "PRODUCT_NAME" >}} uses the Windows Registry `HKLM\Software\Grafana\Grafana Alloy` for service configuration. -* `Arguments` (Type `REG_MULTI_SZ`) Each value represents a binary argument for grafana-agent-flow binary. -* `Environment` (Type `REG_MULTI_SZ`) Each value represents a environment value `KEY=VALUE` for grafana-agent-flow binary. +* `Arguments` (Type `REG_MULTI_SZ`) Each value represents a binary argument for grafana-alloy binary. +* `Environment` (Type `REG_MULTI_SZ`) Each value represents a environment value `KEY=VALUE` for grafana-alloy binary. ## Uninstall diff --git a/docs/sources/get-started/run/binary.md b/docs/sources/get-started/run/binary.md index 8000ec6786..dfbfe9483b 100644 --- a/docs/sources/get-started/run/binary.md +++ b/docs/sources/get-started/run/binary.md @@ -15,7 +15,7 @@ If you [downloaded][InstallBinary] the standalone binary, you must run {{< param To start {{< param "PRODUCT_NAME" >}} on Linux, macOS, or FreeBSD, run the following command in a terminal window: ```shell -AGENT_MODE=flow run + run ``` Replace the following: @@ -28,7 +28,6 @@ Replace the following: To start {{< param "PRODUCT_NAME" >}} on Windows, run the following commands in a command prompt: ```cmd -set AGENT_MODE=flow run ``` @@ -45,26 +44,26 @@ You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. {{< /admonition >}} -1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: +1. To create a new user called `grafana-alloy` run the following command in a terminal window: ```shell - sudo useradd --no-create-home --shell /bin/false grafana-agent-flow + sudo useradd --no-create-home --shell /bin/false grafana-alloy ``` -1. Create a service file in `/etc/systemd/system` called `grafana-agent-flow.service` with the following contents: +1. Create a service file in `/etc/systemd/system` called `grafana-alloy.service` with the following contents: ```systemd [Unit] Description=Vendor-neutral programmable observability pipelines. - Documentation=https://grafana.com/docs/agent/latest/flow/ + Documentation=https://grafana.com/docs/alloy/ Wants=network-online.target After=network-online.target [Service] Restart=always - User=grafana-agent-flow + User=grafana-alloy Environment=HOSTNAME=%H - EnvironmentFile=/etc/default/grafana-agent-flow + EnvironmentFile=/etc/default/grafana-alloy WorkingDirectory= ExecStart= run $CUSTOM_ARGS --storage.path= $CONFIG_FILE ExecReload=/usr/bin/env kill -HUP $MAINPID @@ -78,20 +77,20 @@ These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} c Replace the following: * _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. - * _``_: The path to a working directory, for example `/var/lib/grafana-agent-flow`. + * _``_: The path to a working directory, for example `/var/lib/grafana-alloy`. -1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: +1. Create an environment file in `/etc/default/` called `grafana-alloy` with the following contents: ```shell ## Path: - ## Description: Grafana Agent Flow settings + ## Description: Grafana Alloy settings ## Type: string ## Default: "" - ## ServiceRestart: grafana-agent-flow + ## ServiceRestart: grafana-alloy # - # Command line options for grafana-agent + # Command line options for grafana-alloy # - # The configuration file holding the Grafana Agent Flow configuration. + # The configuration file holding the Grafana Alloy configuration. CONFIG_FILE="" # User-defined arguments to pass to the run command. diff --git a/docs/sources/get-started/run/linux.md b/docs/sources/get-started/run/linux.md index 0fb7873d69..7c31298539 100644 --- a/docs/sources/get-started/run/linux.md +++ b/docs/sources/get-started/run/linux.md @@ -1,8 +1,8 @@ --- canonical: https://grafana.com/docs/alloy/latest/get-started/run/linux/ -description: Learn how to run Grafana Agent Flow on Linux +description: Learn how to run Grafana Alloy on Linux menuTitle: Linux -title: Run Grafana Agent Flow on Linux +title: Run Grafana Alloy on Linux weight: 300 --- @@ -15,13 +15,13 @@ weight: 300 To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -sudo systemctl start grafana-agent-flow +sudo systemctl start grafana-alloy ``` (Optional) To verify that the service is running, run the following command in a terminal window: ```shell -sudo systemctl status grafana-agent-flow +sudo systemctl status grafana-alloy ``` ## Configure {{% param "PRODUCT_NAME" %}} to start at boot @@ -29,7 +29,7 @@ sudo systemctl status grafana-agent-flow To automatically run {{< param "PRODUCT_NAME" >}} when the system starts, run the following command in a terminal window: ```shell -sudo systemctl enable grafana-agent-flow.service +sudo systemctl enable grafana-alloy.service ``` ## Restart {{% param "PRODUCT_NAME" %}} @@ -37,7 +37,7 @@ sudo systemctl enable grafana-agent-flow.service To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -sudo systemctl restart grafana-agent-flow +sudo systemctl restart grafana-alloy ``` ## Stop {{% param "PRODUCT_NAME" %}} @@ -45,7 +45,7 @@ sudo systemctl restart grafana-agent-flow To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -sudo systemctl stop grafana-agent-flow +sudo systemctl stop grafana-alloy ``` ## View {{% param "PRODUCT_NAME" %}} logs on Linux @@ -53,7 +53,7 @@ sudo systemctl stop grafana-agent-flow To view {{< param "PRODUCT_NAME" >}} log files, run the following command in a terminal window: ```shell -sudo journalctl -u grafana-agent-flow +sudo journalctl -u grafana-alloy ``` ## Next steps diff --git a/docs/sources/get-started/run/macos.md b/docs/sources/get-started/run/macos.md index df3ef5537c..e45993d41b 100644 --- a/docs/sources/get-started/run/macos.md +++ b/docs/sources/get-started/run/macos.md @@ -15,7 +15,7 @@ weight: 400 To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -brew services start grafana-agent-flow +brew services start grafana-alloy ``` {{< param "PRODUCT_NAME" >}} automatically runs when the system starts. @@ -23,7 +23,7 @@ brew services start grafana-agent-flow (Optional) To verify that the service is running, run the following command in a terminal window: ```shell -brew services info grafana-agent-flow +brew services info grafana-alloy ``` ## Restart {{% param "PRODUCT_NAME" %}} @@ -31,7 +31,7 @@ brew services info grafana-agent-flow To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -brew services restart grafana-agent-flow +brew services restart grafana-alloy ``` ## Stop {{% param "PRODUCT_NAME" %}} @@ -39,12 +39,12 @@ brew services restart grafana-agent-flow To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell -brew services stop grafana-agent-flow +brew services stop grafana-alloy ``` ## View {{% param "PRODUCT_NAME" %}} logs on macOS -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and `$(brew --prefix)/var/log/grafana-agent-flow.err.log`. +By default, logs are written to `$(brew --prefix)/var/log/grafana-alloy.log` and `$(brew --prefix)/var/log/grafana-alloy.err.log`. If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. diff --git a/docs/sources/reference/cli/_index.md b/docs/sources/reference/cli/_index.md index 66e9c82b1d..607b178395 100644 --- a/docs/sources/reference/cli/_index.md +++ b/docs/sources/reference/cli/_index.md @@ -4,7 +4,7 @@ aliases: canonical: https://grafana.com/docs/alloy/latest/reference/cli/ description: Learn about the Grafana Alloy command line interface menuTitle: Command-line interface -title: The Grafana Agent command-line interface +title: The Grafana Alloy command-line interface weight: 100 --- @@ -20,7 +20,7 @@ Available commands: * [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. * [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. * [`tools`][tools]: Read the WAL and provide statistical information. -* `completion`: Generate shell completion for the `grafana-agent-flow` CLI. +* `completion`: Generate shell completion for the `grafana-alloy` CLI. * `help`: Print help for supported commands. [run]: ./run/ diff --git a/docs/sources/reference/cli/convert.md b/docs/sources/reference/cli/convert.md index d736fb7186..fc59afb97e 100644 --- a/docs/sources/reference/cli/convert.md +++ b/docs/sources/reference/cli/convert.md @@ -18,8 +18,7 @@ The `convert` command converts a supported configuration format to {{< param "PR Usage: -* `AGENT_MODE=flow grafana-agent convert [ ...] ` -* `grafana-agent-flow convert [ ...] ` +* `grafana-alloy convert [ ...] ` Replace the following: @@ -80,12 +79,12 @@ Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate promtai Using the `--source-format=static` will convert the source configuration from a [Grafana Agent Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. Include `--extra-args` for passing additional command line flags from the original format. -For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static [integrations-next][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. +For example, `--extra-args="-enable-features=integrations-next"` converts a Grafana Agent Static [integrations-next][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also expand environment variables with `--extra-args="-config.expand-env"`. You can combine multiple command line flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. If you have unsupported features in a Grafana Agent Static mode source configuration, you will receive [errors][] when you convert to a {{< param "PRODUCT_NAME" >}} configuration. -The converter will also raise warnings for configuration options that may require your attention. +The converter also raises warnings for configuration options that may require your attention. Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}][migrate static] for a detailed migration guide. diff --git a/docs/sources/reference/cli/fmt.md b/docs/sources/reference/cli/fmt.md index 2163deb38c..03ff8170ea 100644 --- a/docs/sources/reference/cli/fmt.md +++ b/docs/sources/reference/cli/fmt.md @@ -16,8 +16,7 @@ The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration fil Usage: -* `AGENT_MODE=flow grafana-agent fmt [FLAG ...] FILE_NAME` -* `grafana-agent-flow fmt [FLAG ...] FILE_NAME` +* `grafana-alloy fmt [FLAG ...] FILE_NAME` Replace the following: diff --git a/docs/sources/reference/cli/run.md b/docs/sources/reference/cli/run.md index 834ca4f47c..642fe6b9bf 100644 --- a/docs/sources/reference/cli/run.md +++ b/docs/sources/reference/cli/run.md @@ -16,8 +16,7 @@ The `run` command runs {{< param "PRODUCT_NAME" >}} in the foreground until an i Usage: -* `AGENT_MODE=flow grafana-agent run [FLAG ...] PATH_NAME` -* `grafana-agent-flow run [FLAG ...] PATH_NAME` +* `grafana-alloy run [FLAG ...] PATH_NAME` Replace the following: @@ -38,10 +37,10 @@ The HTTP server is also exposes a UI at `/` for debugging running components. The following flags are supported: * `--server.http.enable-pprof`: Enable /debug/pprof profiling endpoints. (default `true`). -* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on (default `agent.internal:12345`). +* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on (default `alloy.internal:12345`). * `--server.http.listen-addr`: Address to listen for HTTP traffic on (default `127.0.0.1:12345`). * `--server.http.ui-path-prefix`: Base path where the UI is exposed (default `/`). -* `--storage.path`: Base directory where components can store data (default `data-agent/`). +* `--storage.path`: Base directory where components can store data (default `data-alloy/`). * `--disable-reporting`: Disable [data collection][] (default `false`). * `--cluster.enabled`: Start {{< param "PRODUCT_NAME" >}} in clustered mode (default `false`). * `--cluster.node-name`: The name to use for this node (defaults to the environment's hostname). @@ -138,7 +137,7 @@ with caution because the resulting conversion may not be equivalent to the original configuration. Include `--config.extra-args` to pass additional command line flags from the original format to the converter. -Refer to [grafana-agent-flow convert][] for more details on how `extra-args` work. +Refer to [grafana-alloy convert][] for more details on how `extra-args` work. [grafana-alloy convert]: ../convert/ [clustering]: ../../../concepts/clustering/ diff --git a/docs/sources/reference/cli/tools.md b/docs/sources/reference/cli/tools.md index 2eb29895bb..f20244960a 100644 --- a/docs/sources/reference/cli/tools.md +++ b/docs/sources/reference/cli/tools.md @@ -22,8 +22,7 @@ Utilities in this command have no backward compatibility guarantees and may chan Usage: -* `AGENT_MODE=flow grafana-agent tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` -* `grafana-agent-flow tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` +* `grafana-alloy tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` The `sample-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects information on metric samples within it. @@ -44,8 +43,7 @@ The following flag is supported: Usage: -* `AGENT_MODE=flow grafana-agent tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` -* `grafana-agent-flow tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` +* `grafana-alloy tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` The `target-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects metric cardinality information for a specific target. @@ -62,8 +60,7 @@ The `--job` and `--instance` labels are required. Usage: -* `AGENT_MODE=flow grafana-agent tools prometheus.remote_write wal-stats WAL_DIRECTORY` -* `grafana-agent-flow tools prometheus.remote_write wal-stats WAL_DIRECTORY` +* `grafana-alloy tools prometheus.remote_write wal-stats WAL_DIRECTORY` The `wal-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects general information about it. diff --git a/docs/sources/reference/components/loki.process.md b/docs/sources/reference/components/loki.process.md index d432afd185..a330f063ff 100644 --- a/docs/sources/reference/components/loki.process.md +++ b/docs/sources/reference/components/loki.process.md @@ -272,17 +272,17 @@ stage.eventlogmessage { Given the following log line: ``` -{"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\agent.exe"} +{"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\alloy.exe"} ``` The first stage would create the following key-value pairs in the set of extracted data: -- `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\agent.exe` +- `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\alloy.exe` - `Overwritten`: `old` The second stage will parse the value of `message` from the extracted data and append/overwrite the following key-value pairs to the set of extracted data: -- `Image`: `C:\\Users\\User\\agent.exe` +- `Image`: `C:\\Users\\User\\alloy.exe` - `Message_type`: (empty string) - `Overwritten`: `new` @@ -309,7 +309,7 @@ The map key defines the name with which the data is extracted, while the map val Here's a given log line and two JSON stages to run. ```river -{"log":"log message\n","extra":"{\"user\":\"agent\"}"} +{"log":"log message\n","extra":"{\"user\":\"alloy\"}"} loki.process "username" { stage.json { @@ -327,12 +327,12 @@ In this example, the first stage uses the log line as the source and populates t An empty expression means using the same value as the key (so `extra="extra"`). ``` output: log message\n -extra: {"user": "agent"} +extra: {"user": "alloy"} ``` The second stage uses the value in `extra` as the input and appends the following key-value pair to the set of extracted data. ``` -username: agent +username: alloy ``` {{< admonition type="note" >}} @@ -342,8 +342,8 @@ If you don't use quotes to wrap a string that contains a hyphen, you will get er You can use one of two options to circumvent this issue: -1. An escaped double quote. For example: `http_user_agent = "\"request_User-Agent\""` -1. A backtick quote. For example: ``http_user_agent = `"request_User-Agent"` `` +1. An escaped double quote. For example: `http_user_alloy = "\"request_User-Alloy\""` +1. A backtick quote. For example: ``http_user_alloy = `"request_User-Alloy"` `` {{< /admonition >}} ### stage.label_drop block @@ -584,7 +584,7 @@ The final output stage changes the contents of the log line to be the value of ` ### stage.metrics block The `stage.metrics` inner block configures stage that allows to define and update metrics based on values from the shared extracted map. -The created metrics are available at the Agent's root /metrics endpoint. +The created metrics are available at {{< param "PRODUCT_NAME" >}}'s root /metrics endpoint. The `stage.metrics` block does not support any arguments and is only configured via a number of nested inner `metric.*` blocks, one for each metric that should be generated. @@ -1055,7 +1055,7 @@ The `replace` field can use a set of templating functions, by utilizing Go's [te Let's see how this works with named capture groups with a sample log line and stage. ``` -11.11.11.11 - agent [01/Jan/2023:00:00:01 +0200] +11.11.11.11 - alloy [01/Jan/2023:00:00:01 +0200] stage.replace { expression = "^(?P\\S+) (?P\\S+) (?P\\S+) \\[(?P[\\w:/]+\\s[+\\-]\\d{4})\\]" diff --git a/docs/sources/reference/components/loki.rules.kubernetes.md b/docs/sources/reference/components/loki.rules.kubernetes.md index 3b3f6a28b8..ad3898f973 100644 --- a/docs/sources/reference/components/loki.rules.kubernetes.md +++ b/docs/sources/reference/components/loki.rules.kubernetes.md @@ -47,7 +47,7 @@ Name | Type | Description `tenant_id` | `string` | Loki tenant ID. | | no `use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no `sync_interval` | `duration` | Amount of time between reconciliations with Loki. | "30s" | no -`loki_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_ROOT_NAME" >}} deployments. | "agent" | no +`loki_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_ROOT_NAME" >}} deployments. | "alloy" | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no @@ -188,7 +188,7 @@ Metric Name | Type | Description This example creates a `loki.rules.kubernetes` component that loads discovered rules to a local Loki instance under the `team-a` tenant. Only namespaces and -rules with the `agent` label set to `yes` are included. +rules with the `alloy` label set to `yes` are included. ```river loki.rules.kubernetes "local" { @@ -197,13 +197,13 @@ loki.rules.kubernetes "local" { rule_namespace_selector { match_labels = { - agent = "yes", + alloy = "yes", } } rule_selector { match_labels = { - agent = "yes", + alloy = "yes", } } } @@ -230,13 +230,13 @@ The following example is an RBAC configuration for Kubernetes. It authorizes {{< apiVersion: v1 kind: ServiceAccount metadata: - name: grafana-agent + name: grafana-alloy namespace: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: grafana-alloy rules: - apiGroups: [""] resources: ["namespaces"] @@ -248,13 +248,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: grafana-alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: grafana-alloy namespace: default roleRef: kind: ClusterRole - name: grafana-agent + name: grafana-alloy apiGroup: rbac.authorization.k8s.io ``` diff --git a/docs/sources/reference/components/mimir.rules.kubernetes.md b/docs/sources/reference/components/mimir.rules.kubernetes.md index 7451a5f8ae..7ea9c96fc3 100644 --- a/docs/sources/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/reference/components/mimir.rules.kubernetes.md @@ -21,14 +21,16 @@ loads them into a Mimir instance. * Compatible with the `PrometheusRule` CRD from the [prometheus-operator][]. * This component accesses the Kubernetes REST API from [within a Pod][]. -> **NOTE**: This component requires [Role-based access control (RBAC)][] to be setup -> in Kubernetes in order for the Agent to access it via the Kubernetes REST API. -> For an example RBAC configuration please click [here](#example). +{{< admonition type="note" >}} +This component requires [Role-based access control (RBAC)][] to be set up +in Kubernetes in order for {{< param "PRODUCT_NAME" >}} to access it via the Kubernetes REST API. + +[Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +{{< /admonition >}} [Kubernetes label selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors [prometheus-operator]: https://prometheus-operator.dev/ [within a Pod]: https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/ -[Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ ## Usage @@ -49,7 +51,7 @@ Name | Type | Description `use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no `prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no `sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no +`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "alloy" | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no @@ -205,7 +207,7 @@ Metric Name | Type | Description This example creates a `mimir.rules.kubernetes` component that loads discovered rules to a local Mimir instance under the `team-a` tenant. Only namespaces and -rules with the `agent` label set to `yes` are included. +rules with the `alloy` label set to `yes` are included. ```river mimir.rules.kubernetes "local" { @@ -214,13 +216,13 @@ mimir.rules.kubernetes "local" { rule_namespace_selector { match_labels = { - agent = "yes", + alloy = "yes", } } rule_selector { match_labels = { - agent = "yes", + alloy = "yes", } } } @@ -241,19 +243,19 @@ mimir.rules.kubernetes "default" { } ``` -The following example is an RBAC configuration for Kubernetes. It authorizes the Agent to query the Kubernetes REST API: +The following example is an RBAC configuration for Kubernetes. It authorizes {{< param "PRODUCT_NAME" >}} to query the Kubernetes REST API: ```yaml apiVersion: v1 kind: ServiceAccount metadata: - name: grafana-agent + name: grafana-alloy namespace: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: grafana-agent + name: grafana-alloy rules: - apiGroups: [""] resources: ["namespaces"] @@ -265,13 +267,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: grafana-agent + name: grafana-alloy subjects: - kind: ServiceAccount - name: grafana-agent + name: grafana-alloy namespace: default roleRef: kind: ClusterRole - name: grafana-agent + name: grafana-alloy apiGroup: rbac.authorization.k8s.io ``` diff --git a/docs/sources/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md index 58595b58e1..90dc244549 100644 --- a/docs/sources/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md @@ -65,8 +65,8 @@ Name | Type | Description | Default | The `routing_key` attribute determines how to route signals across endpoints. Its value could be one of the following: * `"service"`: spans with the same `service.name` will be exported to the same backend. -This is useful when using processors like the span metrics, so all spans for each service are sent to consistent Agent instances -for metric collection. Otherwise, metrics for the same services would be sent to different Agents, making aggregations inaccurate. +This is useful when using processors like the span metrics, so all spans for each service are sent to consistent {{< param "PRODUCT_NAME" >}} instances +for metric collection. Otherwise, metrics for the same services would be sent to different instances, making aggregations inaccurate. * `"traceID"`: spans belonging to the same traceID will be exported to the same backend. ## Blocks @@ -124,8 +124,8 @@ Name | Type | Description | Default | Requi ### dns block -The `dns` block periodically resolves an IP address via the DNS `hostname` attribute. This IP address -and the port specified via the `port` attribute will then be used by the gRPC exporter +The `dns` block periodically resolves an IP address via the DNS `hostname` attribute. This IP address +and the port specified via the `port` attribute will then be used by the gRPC exporter as the endpoint to which to export data to. The following arguments are supported: @@ -139,8 +139,8 @@ Name | Type | Description ### kubernetes block -You can use the `kubernetes` block to load balance across the pods of a Kubernetes service. -The Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. +You can use the `kubernetes` block to load balance across the pods of a Kubernetes service. +The Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. The `kubernetes` resolver has a much faster response time than the `dns` resolver because it doesn't require polling. The following arguments are supported: @@ -150,10 +150,10 @@ Name | Type | Description `service` | `string` | Kubernetes service to resolve. | | yes `ports` | `list(number)` | Ports to use with the IP addresses resolved from `service`. | `[4317]` | no -If no namespace is specified inside `service`, an attempt will be made to infer the namespace for this Agent. +If no namespace is specified inside `service`, an attempt will be made to infer the namespace for this {{< param "PRODUCT_NAME" >}}. If this fails, the `default` namespace will be used. -Each of the ports listed in `ports` will be used with each of the IPs resolved from `service`. +Each of the ports listed in `ports` will be used with each of the IPs resolved from `service`. The "get", "list", and "watch" [roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-example) must be granted in Kubernetes for the resolver to work. @@ -169,7 +169,7 @@ The `otlp` block configures OTLP-related settings for exporting. ### client block -The `client` block configures the gRPC client used by the component. +The `client` block configures the gRPC client used by the component. The endpoints used by the client block are the ones from the `resolver` block The following arguments are supported: @@ -333,10 +333,10 @@ Unfortunately, this can also lead to side effects. For example, if `otelcol.connector.spanmetrics` is configured to generate exemplars, the tail sampling {{< param "PRODUCT_ROOT_NAME" >}}s might drop the trace that the exemplar points to. There is no coordination between the tail sampling {{< param "PRODUCT_ROOT_NAME" >}}s and the span metrics {{< param "PRODUCT_ROOT_NAME" >}}s to make sure trace IDs for exemplars are kept. - @@ -382,14 +382,14 @@ otelcol.exporter.loadbalancing "default" { ### DNS resolver -When configured with a `dns` resolver, `otelcol.exporter.loadbalancing` will do a DNS lookup +When configured with a `dns` resolver, `otelcol.exporter.loadbalancing` will do a DNS lookup on regular intervals. Spans are exported to the addresses the DNS lookup returned. ```river otelcol.exporter.loadbalancing "default" { resolver { dns { - hostname = "grafana-agent-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" + hostname = "grafana-alloy-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" port = "34621" interval = "5s" timeout = "1s" @@ -441,7 +441,7 @@ spec: containers: - env: - name: ENDPOINT - value: agent-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 + value: alloy-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 imagePullPolicy: IfNotPresent name: k6-trace-generator @@ -449,23 +449,23 @@ spec: apiVersion: v1 kind: Service metadata: - name: agent-traces-lb + name: alloy-traces-lb namespace: grafana-cloud-monitoring spec: clusterIP: None ports: - - name: agent-traces-otlp-grpc + - name: alloy-traces-otlp-grpc port: 9411 protocol: TCP targetPort: 9411 selector: - name: agent-traces-lb + name: alloy-traces-lb type: ClusterIP --- apiVersion: apps/v1 kind: Deployment metadata: - name: agent-traces-lb + name: alloy-traces-lb namespace: grafana-cloud-monitoring spec: minReadySeconds: 10 @@ -473,59 +473,56 @@ spec: revisionHistoryLimit: 1 selector: matchLabels: - name: agent-traces-lb + name: alloy-traces-lb template: metadata: labels: - name: agent-traces-lb + name: alloy-traces-lb spec: containers: - args: - run - - /etc/agent/agent_lb.river + - /etc/alloy/alloy_lb.river command: - - /bin/grafana-agent - env: - - name: AGENT_MODE - value: flow - image: grafana/agent:v0.38.0 + - /bin/grafana-alloy + image: grafana/alloy:v1.0 imagePullPolicy: IfNotPresent - name: agent-traces + name: alloy-traces ports: - containerPort: 9411 name: otlp-grpc protocol: TCP - containerPort: 34621 - name: agent-lb + name: alloy-lb protocol: TCP volumeMounts: - - mountPath: /etc/agent - name: agent-traces + - mountPath: /etc/alloy + name: alloy-traces volumes: - configMap: - name: agent-traces - name: agent-traces + name: alloy-traces + name: alloy-traces --- apiVersion: v1 kind: Service metadata: - name: agent-traces-sampling + name: alloy-traces-sampling namespace: grafana-cloud-monitoring spec: clusterIP: None ports: - - name: agent-lb + - name: alloy-lb port: 34621 protocol: TCP - targetPort: agent-lb + targetPort: alloy-lb selector: - name: agent-traces-sampling + name: alloy-traces-sampling type: ClusterIP --- apiVersion: apps/v1 kind: Deployment metadata: - name: agent-traces-sampling + name: alloy-traces-sampling namespace: grafana-cloud-monitoring spec: minReadySeconds: 10 @@ -533,46 +530,43 @@ spec: revisionHistoryLimit: 1 selector: matchLabels: - name: agent-traces-sampling + name: alloy-traces-sampling template: metadata: labels: - name: agent-traces-sampling + name: alloy-traces-sampling spec: containers: - args: - run - - /etc/agent/agent_sampling.river + - /etc/alloy/alloy_sampling.river command: - - /bin/grafana-agent - env: - - name: AGENT_MODE - value: flow - image: grafana/agent:v0.38.0 + - /bin/grafana-alloy + image: grafana/alloy:v1.0 imagePullPolicy: IfNotPresent - name: agent-traces + name: alloy-traces ports: - containerPort: 9411 name: otlp-grpc protocol: TCP - containerPort: 34621 - name: agent-lb + name: alloy-lb protocol: TCP volumeMounts: - - mountPath: /etc/agent - name: agent-traces + - mountPath: /etc/alloy + name: alloy-traces volumes: - configMap: - name: agent-traces - name: agent-traces + name: alloy-traces + name: alloy-traces --- apiVersion: v1 kind: ConfigMap metadata: - name: agent-traces + name: alloy-traces namespace: grafana-cloud-monitoring data: - agent_lb.river: | + alloy_lb.river: | otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:9411" @@ -589,7 +583,7 @@ data: otelcol.exporter.loadbalancing "default" { resolver { dns { - hostname = "agent-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" + hostname = "alloy-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" port = "34621" } } @@ -604,7 +598,7 @@ data: } } - agent_sampling.river: | + alloy_sampling.river: | otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:34621" @@ -637,28 +631,28 @@ You can use [k3d][] to start the example: ```bash -k3d cluster create grafana-agent-lb-test +k3d cluster create grafana-alloy-lb-test kubectl apply -f kubernetes_config.yaml ``` To delete the cluster, run: ```bash -k3d cluster delete grafana-agent-lb-test +k3d cluster delete grafana-alloy-lb-test ``` [k3d]: https://k3d.io/v5.6.0/ ### Kubernetes resolver -When you configure `otelcol.exporter.loadbalancing` with a `kubernetes` resolver, the Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. +When you configure `otelcol.exporter.loadbalancing` with a `kubernetes` resolver, the Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. Spans are exported to the addresses from the Kubernetes API, combined with all the possible `ports`. ```river otelcol.exporter.loadbalancing "default" { resolver { kubernetes { - service = "grafana-agent-traces-headless" + service = "grafana-alloy-traces-headless" ports = [ 34621 ] } } @@ -694,13 +688,13 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: agent-traces + name: alloy-traces namespace: grafana-cloud-monitoring --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: agent-traces-role + name: alloy-traces-role namespace: grafana-cloud-monitoring rules: - apiGroups: @@ -715,15 +709,15 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: agent-traces-rolebinding + name: alloy-traces-rolebinding namespace: grafana-cloud-monitoring roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: agent-traces-role + name: alloy-traces-role subjects: - kind: ServiceAccount - name: agent-traces + name: alloy-traces namespace: grafana-cloud-monitoring --- apiVersion: apps/v1 @@ -746,7 +740,7 @@ spec: containers: - env: - name: ENDPOINT - value: agent-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 + value: alloy-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 imagePullPolicy: IfNotPresent name: k6-trace-generator @@ -754,23 +748,23 @@ spec: apiVersion: v1 kind: Service metadata: - name: agent-traces-lb + name: alloy-traces-lb namespace: grafana-cloud-monitoring spec: clusterIP: None ports: - - name: agent-traces-otlp-grpc + - name: alloy-traces-otlp-grpc port: 9411 protocol: TCP targetPort: 9411 selector: - name: agent-traces-lb + name: alloy-traces-lb type: ClusterIP --- apiVersion: apps/v1 kind: Deployment metadata: - name: agent-traces-lb + name: alloy-traces-lb namespace: grafana-cloud-monitoring spec: minReadySeconds: 10 @@ -778,56 +772,53 @@ spec: revisionHistoryLimit: 1 selector: matchLabels: - name: agent-traces-lb + name: alloy-traces-lb template: metadata: labels: - name: agent-traces-lb + name: alloy-traces-lb spec: containers: - args: - run - - /etc/agent/agent_lb.river + - /etc/alloy/alloy_lb.river command: - - /bin/grafana-agent - env: - - name: AGENT_MODE - value: flow - image: grafana/agent:v0.38.0 + - /bin/grafana-alloy + image: grafana/alloy:v1.0 imagePullPolicy: IfNotPresent - name: agent-traces + name: alloy-traces ports: - containerPort: 9411 name: otlp-grpc protocol: TCP volumeMounts: - - mountPath: /etc/agent - name: agent-traces - serviceAccount: agent-traces + - mountPath: /etc/alloy + name: alloy-traces + serviceAccount: alloy-traces volumes: - configMap: - name: agent-traces - name: agent-traces + name: alloy-traces + name: alloy-traces --- apiVersion: v1 kind: Service metadata: - name: agent-traces-sampling + name: alloy-traces-sampling namespace: grafana-cloud-monitoring spec: ports: - - name: agent-lb + - name: alloy-lb port: 34621 protocol: TCP - targetPort: agent-lb + targetPort: alloy-lb selector: - name: agent-traces-sampling + name: alloy-traces-sampling type: ClusterIP --- apiVersion: apps/v1 kind: Deployment metadata: - name: agent-traces-sampling + name: alloy-traces-sampling namespace: grafana-cloud-monitoring spec: minReadySeconds: 10 @@ -835,43 +826,40 @@ spec: revisionHistoryLimit: 1 selector: matchLabels: - name: agent-traces-sampling + name: alloy-traces-sampling template: metadata: labels: - name: agent-traces-sampling + name: alloy-traces-sampling spec: containers: - args: - run - - /etc/agent/agent_sampling.river + - /etc/alloy/alloy_sampling.river command: - - /bin/grafana-agent - env: - - name: AGENT_MODE - value: flow - image: grafana/agent:v0.38.0 + - /bin/grafana-alloy + image: grafana/alloy:v1.0 imagePullPolicy: IfNotPresent - name: agent-traces + name: alloy-traces ports: - containerPort: 34621 - name: agent-lb + name: alloy-lb protocol: TCP volumeMounts: - - mountPath: /etc/agent - name: agent-traces + - mountPath: /etc/alloy + name: alloy-traces volumes: - configMap: - name: agent-traces - name: agent-traces + name: alloy-traces + name: alloy-traces --- apiVersion: v1 kind: ConfigMap metadata: - name: agent-traces + name: alloy-traces namespace: grafana-cloud-monitoring data: - agent_lb.river: | + alloy_lb.river: | otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:9411" @@ -888,7 +876,7 @@ data: otelcol.exporter.loadbalancing "default" { resolver { kubernetes { - service = "agent-traces-sampling" + service = "alloy-traces-sampling" ports = ["34621"] } } @@ -903,7 +891,7 @@ data: } } - agent_sampling.river: | + alloy_sampling.river: | otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:34621" @@ -936,14 +924,14 @@ You must fill in the correct OTLP credentials prior to running the example. You can use [k3d][] to start the example: ```bash -k3d cluster create grafana-agent-lb-test +k3d cluster create grafana-alloy-lb-test kubectl apply -f kubernetes_config.yaml ``` To delete the cluster, run: ```bash -k3d cluster delete grafana-agent-lb-test +k3d cluster delete grafana-alloy-lb-test ``` diff --git a/docs/sources/reference/components/pyroscope.java.md b/docs/sources/reference/components/pyroscope.java.md index 38eade4c77..e74d86100a 100644 --- a/docs/sources/reference/components/pyroscope.java.md +++ b/docs/sources/reference/components/pyroscope.java.md @@ -43,10 +43,10 @@ After component startup, `pyroscope.java` creates a temporary directory under `t async-profiler binaries for both glibc and musl into the directory with the following layout. ``` -/tmp/grafana-agent-asprof-glibc-{SHA1}/bin/asprof -/tmp/grafana-agent-asprof-glibc-{SHA1}/lib/libasyncProfiler.so -/tmp/grafana-agent-asprof-musl-{SHA1}/bin/asprof -/tmp/grafana-agent-asprof-musl-{SHA1}/lib/libasyncProfiler.so +/tmp/grafana-alloy-asprof-glibc-{SHA1}/bin/asprof +/tmp/grafana-alloy-asprof-glibc-{SHA1}/lib/libasyncProfiler.so +/tmp/grafana-alloy-asprof-musl-{SHA1}/bin/asprof +/tmp/grafana-alloy-asprof-musl-{SHA1}/lib/libasyncProfiler.so ``` After process profiling startup, the component detects libc type and copies according `libAsyncProfiler.so` into the diff --git a/docs/sources/reference/config-blocks/remotecfg.md b/docs/sources/reference/config-blocks/remotecfg.md index 233b350903..8b5ce6a1e5 100644 --- a/docs/sources/reference/config-blocks/remotecfg.md +++ b/docs/sources/reference/config-blocks/remotecfg.md @@ -46,7 +46,7 @@ Name | Type | Description If the `url` is not set, then the service block is a no-op. -If not set, the self-reported `id` that {{< param "PRODUCT_NAME" >}} uses is a randomly generated, anonymous unique ID (UUID) that is stored as an `agent_seed.json` file in {{< param "PRODUCT_NAME" >}}'s storage path so that it can persist across restarts. +If not set, the self-reported `id` that {{< param "PRODUCT_NAME" >}} uses is a randomly generated, anonymous unique ID (UUID) that is stored as an `alloy_seed.json` file in {{< param "PRODUCT_NAME" >}}'s storage path so that it can persist across restarts. The `id` and `metadata` fields are used in the periodic request sent to the remote endpoint so that the API can decide what configuration to serve. @@ -82,7 +82,7 @@ For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside {{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} -[API definition]: https://github.com/grafana/agent-remote-config +[API definition]: https://github.com/grafana/alloy-remote-config [beta]: ../../../stability/#beta [basic_auth]: #basic_auth-block [authorization]: #authorization-block diff --git a/docs/sources/reference/config-blocks/tracing.md b/docs/sources/reference/config-blocks/tracing.md index 97cedb47f9..d8ba481e8e 100644 --- a/docs/sources/reference/config-blocks/tracing.md +++ b/docs/sources/reference/config-blocks/tracing.md @@ -92,7 +92,7 @@ Name | Type | Description `type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no Requests to the remote sampling strategies server are made through an HTTP `GET` request to the configured `url` argument. -A `service=grafana-agent` query parameter is always added to the URL to allow the server to respond with service-specific strategies. +A `service=grafana-alloy` query parameter is always added to the URL to allow the server to respond with service-specific strategies. The HTTP response body is read as JSON matching the schema specified by Jaeger's [`strategies.json` file][Jaeger sampling strategies]. The `max_operations` limits the amount of custom span names that can have custom sampling rules. diff --git a/docs/sources/reference/stdlib/env.md b/docs/sources/reference/stdlib/env.md index 84a68ed3b9..1e64f23c50 100644 --- a/docs/sources/reference/stdlib/env.md +++ b/docs/sources/reference/stdlib/env.md @@ -15,7 +15,7 @@ If the environment variable does not exist, `env` returns an empty string. ``` > env("HOME") -"/home/grafana-agent" +"/home/grafana-alloy" > env("DOES_NOT_EXIST") "" diff --git a/docs/sources/tasks/collect-prometheus-metrics.md b/docs/sources/tasks/collect-prometheus-metrics.md index a317f57780..897856b7f2 100644 --- a/docs/sources/tasks/collect-prometheus-metrics.md +++ b/docs/sources/tasks/collect-prometheus-metrics.md @@ -403,8 +403,8 @@ prometheus.scrape "custom_targets" { __metrics_path__ = "/custom-metrics–path", }, { - __address__ = "grafana-agent:12345", - application = "grafana-agent", + __address__ = "grafana-alloy:12345", + application = "grafana-alloy", environment = "production", }, ] diff --git a/docs/sources/tasks/configure-agent-clustering.md b/docs/sources/tasks/configure-alloy-clustering.md similarity index 96% rename from docs/sources/tasks/configure-agent-clustering.md rename to docs/sources/tasks/configure-alloy-clustering.md index 48814033f9..1c2e72bf12 100644 --- a/docs/sources/tasks/configure-agent-clustering.md +++ b/docs/sources/tasks/configure-alloy-clustering.md @@ -1,5 +1,5 @@ --- -canonical: https://grafana.com/docs/alloy/latest/tasks/configure-agent-clustering/ +canonical: https://grafana.com/docs/alloy/latest/tasks/configure-alloy-clustering/ description: Learn how to configure Grafana Alloy clustering in an existing installation menuTitle: Configure clustering title: Configure Grafana Alloy clustering in an existing installation @@ -24,10 +24,10 @@ This section guides you through enabling clustering when {{< param "PRODUCT_NAME To configure clustering: -1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `agent` block. +1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `alloy` block. ```yaml - agent: + alloy: clustering: enabled: true ``` diff --git a/docs/sources/tasks/configure/_index.md b/docs/sources/tasks/configure/_index.md index b8bff7751a..b0d3138242 100644 --- a/docs/sources/tasks/configure/_index.md +++ b/docs/sources/tasks/configure/_index.md @@ -11,9 +11,9 @@ weight: 90 You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: -* Linux: `/etc/grafana-agent-flow.river` -* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` -* Windows: `C:\Program Files\Grafana Agent Flow\config.river` +* Linux: `/etc/grafana-alloy.river` +* macOS: `$(brew --prefix)/etc/grafana-alloy/config.river` +* Windows: `C:\Program Files\Grafana Alloy\config.river` This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. diff --git a/docs/sources/tasks/configure/configure-linux.md b/docs/sources/tasks/configure/configure-linux.md index b0757e9338..090889dd3c 100644 --- a/docs/sources/tasks/configure/configure-linux.md +++ b/docs/sources/tasks/configure/configure-linux.md @@ -10,41 +10,41 @@ weight: 300 To configure {{< param "PRODUCT_NAME" >}} on Linux, perform the following steps: -1. Edit the default configuration file at `/etc/grafana-agent-flow.river`. +1. Edit the default configuration file at `/etc/grafana-alloy.river`. 1. Run the following command in a terminal to reload the configuration file: ```shell - sudo systemctl reload grafana-agent-flow + sudo systemctl reload grafana-alloy ``` To change the configuration file used by the service, perform the following steps: 1. Edit the environment file for the service: - * Debian or Ubuntu: edit `/etc/default/grafana-agent-flow` - * RHEL/Fedora or SUSE/openSUSE: edit `/etc/sysconfig/grafana-agent-flow` + * Debian or Ubuntu: edit `/etc/default/grafana-alloy` + * RHEL/Fedora or SUSE/openSUSE: edit `/etc/sysconfig/grafana-alloy` 1. Change the contents of the `CONFIG_FILE` environment variable to point at the new configuration file to use. 1. Restart the {{< param "PRODUCT_NAME" >}} service: ```shell - sudo systemctl restart grafana-agent-flow + sudo systemctl restart grafana-alloy ``` ## Pass additional command-line flags By default, the {{< param "PRODUCT_NAME" >}} service launches with the [run][] command, passing the following flags: -* `--storage.path=/var/lib/grafana-agent-flow` +* `--storage.path=/var/lib/grafana-alloy` To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary, perform the following steps: 1. Edit the environment file for the service: - * Debian-based systems: edit `/etc/default/grafana-agent-flow` - * RedHat or SUSE-based systems: edit `/etc/sysconfig/grafana-agent-flow` + * Debian-based systems: edit `/etc/default/grafana-alloy` + * RedHat or SUSE-based systems: edit `/etc/sysconfig/grafana-alloy` 1. Change the contents of the `CUSTOM_ARGS` environment variable to specify command-line flags to pass. @@ -52,7 +52,7 @@ To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary 1. Restart the {{< param "PRODUCT_NAME" >}} service: ```shell - sudo systemctl restart grafana-agent-flow + sudo systemctl restart grafana-alloy ``` To see the list of valid command-line flags that can be passed to the service, refer to the documentation for the [run][] command. diff --git a/docs/sources/tasks/configure/configure-macos.md b/docs/sources/tasks/configure/configure-macos.md index d57885617e..a117dff57d 100644 --- a/docs/sources/tasks/configure/configure-macos.md +++ b/docs/sources/tasks/configure/configure-macos.md @@ -10,12 +10,12 @@ weight: 400 To configure {{< param "PRODUCT_NAME" >}} on macOS, perform the following steps: -1. Edit the default configuration file at `$(brew --prefix)/etc/grafana-agent-flow/config.river`. +1. Edit the default configuration file at `$(brew --prefix)/etc/grafana-alloy/config.river`. 1. Run the following command in a terminal to restart the {{< param "PRODUCT_NAME" >}} service: ```shell - brew services restart grafana-agent-flow + brew services restart grafana-alloy ``` ## Configure the {{% param "PRODUCT_NAME" %}} service @@ -29,7 +29,7 @@ To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the foll 1. Run the following command in a terminal: ```shell - brew edit grafana-agent-flow + brew edit grafana-alloy ``` This will open the {{< param "PRODUCT_NAME" >}} Homebrew Formula in an editor. @@ -45,13 +45,13 @@ To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the foll 1. Reinstall the {{< param "PRODUCT_NAME" >}} Formula by running the following command in a terminal: ```shell - brew reinstall grafana-agent-flow + brew reinstall grafana-alloy ``` 1. Restart the {{< param "PRODUCT_NAME" >}} service by running the command in a terminal: ```shell - brew services restart grafana-agent-flow + brew services restart grafana-aalloy ``` ## Expose the UI to other machines @@ -61,7 +61,7 @@ This prevents other machines on the network from being able to access the [UI fo To expose the UI to other machines, complete the following steps: -1. Follow [Configure the {{< param "PRODUCT_NAME" >}} service](#configure-the-grafana-agent-flow-service) +1. Follow [Configure the {{< param "PRODUCT_NAME" >}} service](#configure-the-grafana-alloy-service) to edit command line flags passed to {{< param "PRODUCT_NAME" >}}, including the following customizations: diff --git a/docs/sources/tasks/configure/configure-windows.md b/docs/sources/tasks/configure/configure-windows.md index 93fc1a4f1a..68e5f61f4a 100644 --- a/docs/sources/tasks/configure/configure-windows.md +++ b/docs/sources/tasks/configure/configure-windows.md @@ -10,7 +10,7 @@ weight: 500 To configure {{< param "PRODUCT_NAME" >}} on Windows, perform the following steps: -1. Edit the default configuration file at `C:\Program Files\Grafana Agent Flow\config.river`. +1. Edit the default configuration file at `C:\Program Files\Grafana Alloy\config.river`. 1. Restart the {{< param "PRODUCT_NAME" >}} service: @@ -30,8 +30,8 @@ By default, the {{< param "PRODUCT_NAME" >}} service will launch and pass the following arguments to the {{< param "PRODUCT_NAME" >}} binary: * `run` -* `C:\Program Files\Grafana Agent Flow\config.river` -* `--storage.path=C:\ProgramData\Grafana Agent Flow\data` +* `C:\Program Files\Grafana Alloy\config.river` +* `--storage.path=C:\ProgramData\Grafana Alloy\data` To change the set of command-line arguments passed to the {{< param "PRODUCT_ROOT_NAME" >}} binary, perform the following steps: @@ -42,7 +42,7 @@ binary, perform the following steps: 1. Type `regedit` and click **OK**. -1. Navigate to the key at the path `HKEY_LOCAL_MACHINE\SOFTWARE\Grafana\Grafana Agent Flow`. +1. Navigate to the key at the path `HKEY_LOCAL_MACHINE\SOFTWARE\Grafana\Grafana Alloy`. 1. Double-click on the value called **Arguments***. diff --git a/docs/sources/tasks/debug.md b/docs/sources/tasks/debug.md index 4f2615dc5c..5b2a25146e 100644 --- a/docs/sources/tasks/debug.md +++ b/docs/sources/tasks/debug.md @@ -23,10 +23,10 @@ This default prevents other machines on the network from being able to view the To expose the UI to other machines on the network on non-containerized platforms, refer to the documentation for how you [installed][install] {{< param "PRODUCT_NAME" >}}. -If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, refer to the documentation for the [`grafana-agent run` command][grafana-agent run] to learn how to change the HTTP listen address, > and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. +If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, refer to the documentation for the [`grafana-alloy run` command][grafana-alloy run] to learn how to change the HTTP listen address, > and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. [install]: ../../get-started/install/ -[grafana-agent run]: ../../reference/cli/run/ +[grafana-alloy run]: ../../reference/cli/run/ {{< /admonition >}} ### Home page diff --git a/docs/sources/tasks/distribute-prometheus-scrape-load.md b/docs/sources/tasks/distribute-prometheus-scrape-load.md index 8b74ad60ed..b440c20ef7 100644 --- a/docs/sources/tasks/distribute-prometheus-scrape-load.md +++ b/docs/sources/tasks/distribute-prometheus-scrape-load.md @@ -41,5 +41,5 @@ To distribute Prometheus metrics scrape load with clustering: [Clustering]: ../../concepts/clustering/ [configure]: ../configure/ [Configure Prometheus metrics collection]: ../collect-prometheus-metrics/ -[Configure clustering]: ../configure-agent-clustering/ +[Configure clustering]: ../configure-alloy-clustering/ [UI]: ../debug/#component-detail-page diff --git a/docs/sources/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md index c01edd7c03..e3aaacf14d 100644 --- a/docs/sources/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -42,7 +42,7 @@ This guide provides some steps to get started with {{< param "PRODUCT_NAME" >}} This is one of many deployment possible modes. For example, you may want to use a `DaemonSet` to collect host-level logs or metrics. See the {{< param "PRODUCT_NAME" >}} [deployment guide][] for more details about different topologies. -1. Create a {{< param "PRODUCT_ROOT_NAME" >}} configuration file, `agent.river`. +1. Create a {{< param "PRODUCT_ROOT_NAME" >}} configuration file, `alloy.river`. In the next step, you add to this configuration as you convert `MetricsInstances`. You can add any additional configuration to this file as you need. @@ -53,10 +53,10 @@ This guide provides some steps to get started with {{< param "PRODUCT_NAME" >}} helm repo update ``` -1. Create a Helm release. You can name the release anything you like. The following command installs a release called `grafana-agent-metrics` in the `monitoring` namespace. +1. Create a Helm release. You can name the release anything you like. The following command installs a release called `grafana-alloy-metrics` in the `monitoring` namespace. ```shell - helm upgrade grafana-agent-metrics grafana/grafana-agent -i -n monitoring -f values.yaml --set-file agent.configMap.content=agent.river + helm upgrade grafana-alloy-metrics grafana/grafana-alloy -i -n monitoring -f values.yaml --set-file alloy.configMap.content=alloy.river ``` This command uses the `--set-file` flag to pass the configuration file as a Helm value so that you can continue to edit it as a regular River file. @@ -149,13 +149,13 @@ agent: varlog: true ``` -This command will install a release named `grafana-agent-logs` in the `monitoring` namespace: +This command installs a release named `grafana-agent-logs` in the `monitoring` namespace: ``` helm upgrade grafana-agent-logs grafana/grafana-agent -i -n monitoring -f values-logs.yaml --set-file agent.configMap.content=agent-logs.river ``` -This simple configuration will scrape logs for every pod on each node: +This simple configuration scrapes logs for every Pod on each node: ```river // read the credentials secret for remote_write authorization diff --git a/docs/sources/tasks/migrate/from-prometheus.md b/docs/sources/tasks/migrate/from-prometheus.md index d5bdc6bd82..179976cac2 100644 --- a/docs/sources/tasks/migrate/from-prometheus.md +++ b/docs/sources/tasks/migrate/from-prometheus.md @@ -38,14 +38,18 @@ This conversion will enable you to take full advantage of the many additional fe {{< code >}} - ```static-binary + ```agent-static-binary AGENT_MODE=flow grafana-agent convert --source-format=prometheus --output= ``` - ```flow-binary + ```agent-flow-binary grafana-agent-flow convert --source-format=prometheus --output= ``` +```alloy-binary + grafana-alloy convert --source-format=prometheus --output= + ``` + {{< /code >}} Replace the following: @@ -75,6 +79,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=prometheus --bypass-errors --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=prometheus --bypass-errors --output= + ``` + {{< /code >}} Replace the following: @@ -94,6 +102,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=prometheus --report= --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=prometheus --report= --output= + ``` + {{< /code >}} Replace the following: @@ -130,11 +142,11 @@ Your configuration file must be a valid Prometheus configuration file rather tha 1. Refer to [Debug {{< param "PRODUCT_NAME" >}}][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. 1. If your Prometheus configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. - You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. + You can bypass any non-critical issues and start {{< param "PRODUCT_NAME" >}} by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. - Do not use this flag in a production environment. + Don't use this flag in a production environment. {{< /admonition >}} ## Example @@ -172,6 +184,10 @@ AGENT_MODE=flow grafana-agent convert --source-format=prometheus --output= ``` +```alloy-binary +grafana-alloy convert --source-format=prometheus --output= +``` + {{< /code >}} Replace the following: diff --git a/docs/sources/tasks/migrate/from-promtail.md b/docs/sources/tasks/migrate/from-promtail.md index 6699ed03f2..7516fc34b9 100644 --- a/docs/sources/tasks/migrate/from-promtail.md +++ b/docs/sources/tasks/migrate/from-promtail.md @@ -46,6 +46,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=promtail --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=promtail --output= + ``` + {{< /code >}} @@ -73,6 +77,10 @@ This conversion will enable you to take full advantage of the many additional fe ```flow-binary grafana-agent-flow convert --source-format=promtail --bypass-errors --output= + ``` + + ```alloy-binary + grafana-alloy convert --source-format=promtail --bypass-errors --output= ``` {{< /code >}} @@ -91,6 +99,10 @@ This conversion will enable you to take full advantage of the many additional fe ```flow-binary grafana-agent-flow convert --source-format=promtail --report= --output= + ``` + + ```alloy-binary + grafana-alloy convert --source-format=promtail --report= --output= ``` {{< /code >}} @@ -163,6 +175,10 @@ AGENT_MODE=flow grafana-agent convert --source-format=promtail --output= ``` +```alloy-binary +grafana-alloy convert --source-format=promtail --output= +``` + {{< /code >}} Replace the following: diff --git a/docs/sources/tasks/migrate/from-static.md b/docs/sources/tasks/migrate/from-static.md index 0e82ff92ac..faa9e18baf 100644 --- a/docs/sources/tasks/migrate/from-static.md +++ b/docs/sources/tasks/migrate/from-static.md @@ -49,6 +49,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=static --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=static --output= + ``` + {{< /code >}} Replace the following: @@ -78,6 +82,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=static --bypass-errors --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=static --bypass-errors --output= + ``` + {{< /code >}} Replace the following: @@ -97,6 +105,10 @@ This conversion will enable you to take full advantage of the many additional fe grafana-agent-flow convert --source-format=static --report= --output= ``` + ```alloy-binary + grafana-alloy convert --source-format=static --report= --output= + ``` + {{< /code >}} Replace the following: @@ -208,6 +220,11 @@ AGENT_MODE=flow grafana-agent convert --source-format=static --output= ``` +```alloy-binary +grafana-alloy convert --source-format=static --output= +``` + + {{< /code >}} Replace the following: @@ -306,6 +323,10 @@ AGENT_MODE=flow grafana-agent convert --source-format=static --extra-args="-enab grafana-agent-flow convert --source-format=static --extra-args="-enable-features=integrations-next" --output= ``` +```alloy-binary +grafana-alloy convert --source-format=static --extra-args="-enable-features=integrations-next" --output= +``` + {{< /code >}} Replace the following: diff --git a/docs/sources/tasks/monitor/component_metrics.md b/docs/sources/tasks/monitor/component_metrics.md index 65cdf81261..a684fa5e45 100644 --- a/docs/sources/tasks/monitor/component_metrics.md +++ b/docs/sources/tasks/monitor/component_metrics.md @@ -15,7 +15,7 @@ These component-specific metrics are only generated when an instance of that com Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. +> The documentation for the [`grafana-alloy run`][grafana-alloy run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. Component-specific metrics have a `component_id` label matching the component ID generating those metrics. For example, component-specific metrics for a `prometheus.remote_write` component labeled `production` will have a `component_id` label with the value `prometheus.remote_write.production`. @@ -24,5 +24,5 @@ The [reference documentation][] for each component described the list of compone Not all components expose metrics. [components]: ../../../concepts/components/ -[grafana-agent run]: ../../../reference/cli/run/ +[grafana-alloy run]: ../../../reference/cli/run/ [reference documentation]: ../../../reference/components/ diff --git a/docs/sources/tasks/monitor/controller_metrics.md b/docs/sources/tasks/monitor/controller_metrics.md index 6ce2bf5010..7eb76deee4 100644 --- a/docs/sources/tasks/monitor/controller_metrics.md +++ b/docs/sources/tasks/monitor/controller_metrics.md @@ -11,17 +11,17 @@ The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus met Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. +> The documentation for the [`grafana-alloy run`][grafana-alloy run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. The controller exposes the following metrics: -* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. +* `alloy_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. This value may be misrepresented depending on how fast evaluations complete or how often evaluations occur. -* `agent_component_controller_running_components` (Gauge): The current number of running components by health. +* `alloy_component_controller_running_components` (Gauge): The current number of running components by health. The health is represented in the `health_type` label. -* `agent_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. -* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. -* `agent_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. +* `alloy_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. +* `alloy_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. +* `alloy_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. [component controller]: ../../../concepts/component_controller/ -[grafana-agent run]: ../../../reference/cli/run/ +[grafana-alloy run]: ../../../reference/cli/run/ diff --git a/docs/sources/tasks/opentelemetry-to-lgtm-stack.md b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md index 7d78626a36..3eb3f14df3 100644 --- a/docs/sources/tasks/opentelemetry-to-lgtm-stack.md +++ b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md @@ -270,14 +270,14 @@ loki.write "grafana_cloud_loki" { Running {{< param "PRODUCT_NAME" >}} now will give you the following: ``` -AGENT_MODE=flow ./grafana-agent run agent-config.river -AGENT_MODE=flow ./grafana-agent run agent-config.river +./grafana-alloy run alloy-config.river +./grafana-alloy run alloy-config.river ts=2023-05-09T09:37:15.300959Z level=info msg="running usage stats reporter" ts=2023-05-09T09:37:15.300958Z level=info msg="now listening for http traffic" addr=127.0.0.1:12345 ts=2023-05-09T09:37:15.301104Z level=info trace_id=6466516c9e1a556422df7a84c0ade6b0 msg="starting complete graph evaluation" ts=2023-05-09T09:37:15.301307Z level=info trace_id=6466516c9e1a556422df7a84c0ade6b0 msg="finished node evaluation" node_id=loki.write.grafana_cloud_loki duration=188.209µs ts=2023-05-09T09:37:15.301334Z level=info trace_id=6466516c9e1a556422df7a84c0ade6b0 msg="finished node evaluation" node_id=otelcol.exporter.loki.grafana_cloud_loki duration=18.791µs -ts=2023-05-09T09:37:15.303138Z component=prometheus.remote_write.grafana_cloud_prometheus level=info subcomponent=wal msg="replaying WAL, this may take a while" dir=data-agent/prometheus.remote_write.grafana_cloud_prometheus/wal +ts=2023-05-09T09:37:15.303138Z component=prometheus.remote_write.grafana_cloud_prometheus level=info subcomponent=wal msg="replaying WAL, this may take a while" dir=data-alloy/prometheus.remote_write.grafana_cloud_prometheus/wal ts=2023-05-09T09:37:15.303257Z component=prometheus.remote_write.grafana_cloud_prometheus level=info subcomponent=wal msg="WAL segment loaded" segment=0 maxSegment=1 ts=2023-05-09T09:37:15.303302Z component=prometheus.remote_write.grafana_cloud_prometheus level=info subcomponent=wal msg="WAL segment loaded" segment=1 maxSegment=1 ts=2023-05-09T09:37:15.303507Z component=prometheus.remote_write.grafana_cloud_prometheus subcomponent=rw level=info remote_name=7f623a url=https://prometheus-us-central1.grafana.net/api/prom/push msg="Starting WAL watcher" queue=7f623a @@ -299,7 +299,7 @@ ts=2023-05-09T09:37:15.304109Z component=otelcol.receiver.otlp.default level=inf ts=2023-05-09T09:37:15.304234Z component=otelcol.receiver.otlp.default level=info msg="Starting HTTP server" endpoint=0.0.0.0:4318 ``` -You can now check the pipeline graphically by visiting [http://localhost:12345/graph][] +You can check the pipeline graphically by visiting [http://localhost:12345/graph][] ![](../../../assets/tasks/otlp-lgtm-graph.png) diff --git a/docs/sources/tutorials/collecting-prometheus-metrics.md b/docs/sources/tutorials/collecting-prometheus-metrics.md index d5600a4010..324a1be083 100644 --- a/docs/sources/tutorials/collecting-prometheus-metrics.md +++ b/docs/sources/tutorials/collecting-prometheus-metrics.md @@ -88,8 +88,7 @@ prometheus.remote_write "prom" { To try out {{< param "PRODUCT_ROOT_NAME" >}} without using Docker: 1. Download {{< param "PRODUCT_ROOT_NAME" >}}. -1. Set the environment variable `AGENT_MODE=flow`. -1. Run the {{< param "PRODUCT_ROOT_NAME" >}} with `grafana-agent run `. +1. Run the {{< param "PRODUCT_ROOT_NAME" >}} with `grafana-alloy run `. [Docker]: https://www.docker.com/products/docker-desktop diff --git a/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md index 363a4e8294..acf30a1002 100644 --- a/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md +++ b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md @@ -51,7 +51,7 @@ A River file is comprised of three things: The default log level is `info` and the default log format is `logfmt`. {{< /admonition >}} - Try pasting this into `config.river` and running `/path/to/agent run config.river` to see what happens. + Try pasting this into `config.river` and running `/path/to/alloy run config.river` to see what happens. Congratulations, you've just written your first River file! You've also just written your first {{< param "PRODUCT_NAME" >}} configuration file. This configuration won't do anything, so let's add some components to it. @@ -150,7 +150,7 @@ prometheus.remote_write "local_prom" { Run {{< param "PRODUCT_NAME" >}} with: ```bash -/path/to/agent run config.river +/path/to/alloy run config.river ``` Navigate to [http://localhost:3000/explore][] in your browser. @@ -212,7 +212,7 @@ You may find the [concat][] standard library function useful. You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by running: ```bash -/path/to/agent run config.river +/path/to/alloy run config.river ``` Navigate to [http://localhost:3000/explore][] in your browser. @@ -272,7 +272,7 @@ You might have noticed that running {{< param "PRODUCT_NAME" >}} with the config This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. -If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. +If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/alloy run config.river --storage.path /etc/grafana-alloy`. Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. diff --git a/docs/sources/tutorials/flow-by-example/get-started.md b/docs/sources/tutorials/flow-by-example/get-started.md index 15bfbea870..ebb8915270 100644 --- a/docs/sources/tutorials/flow-by-example/get-started.md +++ b/docs/sources/tutorials/flow-by-example/get-started.md @@ -83,4 +83,4 @@ The Recommended Reading sections in each tutorial provide a list of documentatio [alloy]: https://grafana.com/docs/alloy/latest/ [River]: https://github.com/grafana/river -[install]: ../../../setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary +[install]: ../../../get-started/install/binary/#install-grafana-alloy-as-a-standalone-binary diff --git a/docs/sources/tutorials/flow-by-example/processing-logs/index.md b/docs/sources/tutorials/flow-by-example/processing-logs/index.md index 22e52dc001..5e4aa5fdd4 100644 --- a/docs/sources/tutorials/flow-by-example/processing-logs/index.md +++ b/docs/sources/tutorials/flow-by-example/processing-logs/index.md @@ -325,7 +325,7 @@ Now that you have all of the pieces, let's run {{< param "PRODUCT_ROOT_NAME" >}} Modify `config.river` with the config from the previous example and start {{< param "PRODUCT_ROOT_NAME" >}} with: ```bash -/path/to/agent run config.river +/path/to/alloy run config.river ``` To get the current time in `RFC3339` format, you can run: From 2c7d56afed105384521c68789b4ce2247eec12ec Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 18 Mar 2024 12:10:35 -0400 Subject: [PATCH 020/136] discovery.process: restore public preview status (#37) discovery.process was mistakenly bumped to GA, but this wasn't intentional and the maintainers have asked for it to remain at beta/public preview. --- docs/sources/reference/components/discovery.process.md | 4 +++- internal/component/discovery/process/process.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/components/discovery.process.md b/docs/sources/reference/components/discovery.process.md index 12df00f41f..042d410f92 100644 --- a/docs/sources/reference/components/discovery.process.md +++ b/docs/sources/reference/components/discovery.process.md @@ -2,6 +2,8 @@ canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.process/ description: Learn about discovery.process title: discovery.process +labels: + stage: beta --- # discovery.process @@ -205,4 +207,4 @@ Connecting some components may not be sensible or components may require further Refer to the linked documentation for more details. {{< /admonition >}} - \ No newline at end of file + diff --git a/internal/component/discovery/process/process.go b/internal/component/discovery/process/process.go index 54e1d08f02..a5b2ec6258 100644 --- a/internal/component/discovery/process/process.go +++ b/internal/component/discovery/process/process.go @@ -15,7 +15,7 @@ import ( func init() { component.Register(component.Registration{ Name: "discovery.process", - Stability: featuregate.StabilityStable, + Stability: featuregate.StabilityBeta, Args: Arguments{}, Exports: discovery.Exports{}, From d1db6f8b295815c7d0d651ed627dca5f5774aea0 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 11:33:14 -0400 Subject: [PATCH 021/136] misc: create new README.md for Alloy (#40) This does not yet include the new logo or updated screenshots; that will come in a follow-up PR. --- README.md | 178 +++++++++++++++++++++++++++++------------------------- 1 file changed, 97 insertions(+), 81 deletions(-) diff --git a/README.md b/README.md index 87ba44ba8d..9010561f48 100644 --- a/README.md +++ b/README.md @@ -1,97 +1,104 @@

Grafana Alloy logo

-[Grafana Alloy][] is an OpenTelemetry Collector distribution with configuration -inspired by [Terraform][]. It is designed to be flexible, performant, and -compatible with multiple ecosystems such as Prometheus and OpenTelemetry. +

+ Latest Release + Documentation link +

-Grafana Alloy is based around **components**. Components are wired together to -form programmable observability **pipelines** for telemetry collection, -processing, and delivery. +Grafana Alloy is a vendor-agnostic OpenTelemetry Collector distribution with +extra capabilities for writing, running, and debugging powerful pipelines. -Grafana Alloy can collect, transform, and send data to: +

+ +

-* The [Prometheus][] ecosystem -* The [OpenTelemetry][] ecosystem -* The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) +## What can Alloy do? -[Terraform]: https://terraform.io -[Grafana Alloy]: https://grafana.com/docs/alloy/latest/ -[Prometheus]: https://prometheus.io -[OpenTelemetry]: https://opentelemetry.io -[Loki]: https://github.com/grafana/loki -[Grafana]: https://github.com/grafana/grafana -[Tempo]: https://github.com/grafana/tempo -[Mimir]: https://github.com/grafana/mimir -[Pyroscope]: https://github.com/grafana/pyroscope - -## Why use Grafana Alloy? - -* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and - Grafana open source ecosystems. -* **Every signal**: Collect telemetry data for metrics, logs, traces, and - continuous profiles. -* **Scalable**: Deploy on any number of machines to collect millions of active - series and terabytes of logs. -* **Battle-tested**: Grafana Alloy extends the existing battle-tested code from - the Prometheus and OpenTelemetry Collector projects. -* **Powerful**: Write programmable pipelines with ease, and debug them using a - [built-in UI][UI]. -* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and - Apache to get telemetry that's immediately useful. - -[UI]: https://grafana.com/docs/alloy/latest/tasks/debug/#grafana-alloy-ui +* **Programmable pipelines**: Use a rich [expression-based syntax][syntax] for + configuring powerful observability pipelines. -## Getting started +* **OpenTelemetry Collector Distribution**: Alloy is a [distribution][] of + OpenTelemetry Collector and supports dozens of its components, alongside new + components that make use of Alloy's programmable pipelines. -Check out our [documentation][] to see: +* **Vendor-agnostic**: Alloy embraces Grafana's "big tent" philosophy, and has + components to perfectly integrate with multiple telemetry ecosystems: + + * [OpenTelemetry Collector][] + * [Prometheus][] + * [Grafana Loki][] + * [Grafana Pyroscope][] + +* **Kubernetes-native**: Use components to interact with native and custom + Kubernetes resources; no need to learn how to use a separate Kubernetes + operator. + +* **Shareable pipelines**: Use [modules][] to share your pipelines with the + world. -* [Installation instructions][] for Grafana Alloy -* Details about [Grafana Alloy][documentation] -* Steps for [Getting started][] with Grafana Alloy -* The list of Grafana Alloy [Components][] +* **Automatic workload distribution**: Configure Alloy instances to form a + [cluster][] for automatic workload distribution. -[documentation]: https://grafana.com/docs/alloy/ -[Installation instructions]: https://grafana.com/docs/alloy/latest/setup/install/ -[Getting started]: https://grafana.com/docs/alloy/latest/getting_started/ -[Components]: https://grafana.com/docs/alloy/latest/reference/components/ +* **Centralized configuration support**: Alloy supports retrieving its + configuration from a [server][remotecfg] for centralized configuration + management. + +* **Debugging utilities**: Use the [built-in UI][ui] for visualizing and + debugging pipelines. + +[syntax]: https://grafana.com/docs/alloy/latest/concepts/config-syntax/ +[distribution]: https://opentelemetry.io/docs/collector/distributions/ +[OpenTelemetry Collector]: https://opentelemetry.io +[Prometheus]: https://prometheus.io +[Grafana Loki]: https://github.com/grafana/loki +[Grafana Pyroscope]: https://github.com/grafana/pyroscope +[modules]: https://grafana.com/docs/alloy/latest/concepts/modules/ +[cluster]: https://grafana.com/docs/alloy/latest/concepts/clustering/ +[remotecfg]: https://grafana.com/docs/alloy/latest/reference/config-blocks/remotecfg/ +[ui]: https://grafana.com/docs/alloy/latest/tasks/debug/ ## Example -```river -// Discover Kubernetes pods to collect metrics from. -discovery.kubernetes "pods" { - role = "pod" -} +```alloy +otelcol.receiver.otlp "example" { + grpc { + endpoint = "127.0.0.1:4317" + } -// Collect metrics from Kubernetes pods. -prometheus.scrape "default" { - targets = discovery.kubernetes.pods.targets - forward_to = [prometheus.remote_write.default.receiver] + output { + metrics = [otelcol.processor.batch.example.input] + logs = [otelcol.processor.batch.example.input] + traces = [otelcol.processor.batch.example.input] + } } -// Get an API key from disk. -local.file "apikey" { - filename = "/var/data/my-api-key.txt" - is_secret = true +otelcol.processor.batch "example" { + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } } -// Send metrics to a Prometheus remote_write endpoint. -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9009/api/prom/push" - - basic_auth { - username = "MY_USERNAME" - password = local.file.apikey.content - } +otelcol.exporter.otlp "default" { + client { + endpoint = "my-otlp-grpc-server:4317" } } ``` -We maintain an example [Docker Compose environment][] that can be used to -launch dependencies to play with Grafana Alloy locally. +## Getting started + +Check out our [documentation][] to see: + +* [Installation instructions][install] for Alloy +* Steps for [Getting started][get-started] with Alloy +* The list of Alloy [components][] -[Docker Compose environment]: ./example/docker-compose/ +[documentation]: https://grafana.com/docs/alloy/latest +[install]: https://grafana.com/docs/alloy/latest/setup/install/ +[get-started]: https://grafana.com/docs/alloy/latest/getting_started/ +[components]: https://grafana.com/docs/alloy/latest/reference/components/ ## Release cadence @@ -106,25 +113,34 @@ OpenTelemetry Collector code if new versions are available. Minor releases published outside of the release cadence may not include these dependency updates. -Patch and security releases may be created at any time. +Patch and security releases may be published at any time. ## Community -To engage with the Grafana Alloy community: +To engage with the Alloy community: * Chat with us on our community Slack channel. To invite yourself to the Grafana Slack, visit and join the `#alloy` channel. -* Ask questions on the [Discussions page][]. -* [File an issue][] for bugs, issues, and feature suggestions. -* Attend the monthly [community call][]. -[Discussions page]: https://github.com/grafana/alloy/discussions -[File an issue]: https://github.com/grafana/alloy/issues/new -[community call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo +* Ask questions on the [Grafana community website][community]. -## Contribute +* [File an issue][issue] for bugs, issues, and feature suggestions. + +* Attend the monthly [community call][community-call]. + +[community]: https://community.grafana.com +[issue]: https://github.com/grafana/alloy/issues/new +[community-call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo + +## Contributing Refer to our [contributors guide][] to learn how to contribute. -[contributors guide]: ./docs/developer/contributing.md +Thanks to all the people who have already contributed! + + + + + +[contributors guide]: https://github.com/grafana/alloy/blob/main/docs/developer/contributing.md From 415e658d37ac276a5ede35333433fd26f465d2ca Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 11:40:15 -0400 Subject: [PATCH 022/136] docs: remove step to create discussion page (#41) At least for now, the idea is to focus discussion towards , so discussions have been disabled in the Alloy repo. --- docs/developer/release/6-publish-release.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/developer/release/6-publish-release.md b/docs/developer/release/6-publish-release.md index 48a492aeb4..d36b828d8c 100644 --- a/docs/developer/release/6-publish-release.md +++ b/docs/developer/release/6-publish-release.md @@ -15,15 +15,14 @@ This is how to publish the release in GitHub. 3. Add a footer to the `Notable Changes` section: `For a full list of changes, please refer to the [CHANGELOG](https://github.com/grafana/agent/blob/RELEASE_VERSION/CHANGELOG.md)!` - - Do not substitute the value for `CHANGELOG`. + + Do not substitute the value for `CHANGELOG`. 4. At the bottom of the release page, perform the following: - - Tick the check box to "add a discussion" under the category for "announcements". - For a Release Candidate, tick the checkbox to "pre-release". - For a Stable Release or Patch Release, tick the checkbox to "set as the latest release". 5. Optionally, have other team members review the release draft if you wish to feel more comfortable with it. -6. Publish the release! \ No newline at end of file +6. Publish the release! From bc338240fae0b887ba7f8c5c5b9c1adafc455b1b Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 12:03:02 -0400 Subject: [PATCH 023/136] Revert "docs: remove step to create discussion page" (#42) * Revert "docs: remove step to create discussion page (#41)" This reverts commit 415e658d37ac276a5ede35333433fd26f465d2ca. * add discussions link to README.md --- README.md | 4 ++-- docs/developer/release/6-publish-release.md | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9010561f48..0c7b9a1c05 100644 --- a/README.md +++ b/README.md @@ -123,13 +123,13 @@ To engage with the Alloy community: Grafana Slack, visit and join the `#alloy` channel. -* Ask questions on the [Grafana community website][community]. +* Ask questions on the [Discussions page][discussions]. * [File an issue][issue] for bugs, issues, and feature suggestions. * Attend the monthly [community call][community-call]. -[community]: https://community.grafana.com +[discussions]: https://github.com/grafana/agent/discussions [issue]: https://github.com/grafana/alloy/issues/new [community-call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo diff --git a/docs/developer/release/6-publish-release.md b/docs/developer/release/6-publish-release.md index d36b828d8c..48a492aeb4 100644 --- a/docs/developer/release/6-publish-release.md +++ b/docs/developer/release/6-publish-release.md @@ -15,14 +15,15 @@ This is how to publish the release in GitHub. 3. Add a footer to the `Notable Changes` section: `For a full list of changes, please refer to the [CHANGELOG](https://github.com/grafana/agent/blob/RELEASE_VERSION/CHANGELOG.md)!` - - Do not substitute the value for `CHANGELOG`. + + Do not substitute the value for `CHANGELOG`. 4. At the bottom of the release page, perform the following: + - Tick the check box to "add a discussion" under the category for "announcements". - For a Release Candidate, tick the checkbox to "pre-release". - For a Stable Release or Patch Release, tick the checkbox to "set as the latest release". 5. Optionally, have other team members review the release draft if you wish to feel more comfortable with it. -6. Publish the release! +6. Publish the release! \ No newline at end of file From 077fb6db33984c1f4725606dc8571619c38b12df Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 12:33:27 -0400 Subject: [PATCH 024/136] misc: setup Grafana Alloy governance (#43) --- GOVERNANCE.md | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index eeb79d9e21..3a426b7d62 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -3,20 +3,20 @@ title: Governance --- # Governance -This document describes the rules and governance of the project. It is meant to be followed by all the developers of the project and the Grafana Agent community. Common terminology used in this governance document are listed below: +This document describes the rules and governance of the project. It is meant to be followed by all the developers of the project and the Grafana Alloy community. Common terminology used in this governance document are listed below: - **Team members**: Any members of the private [team mailing list][team]. - **Maintainers**: Maintainers lead an individual project or parts thereof ([`MAINTAINERS.md`][maintainers]). - **Projects**: A single repository in the Grafana GitHub organization and listed below is referred to as a project: - - Grafana Agent + - Grafana Alloy -- **The Grafana Agent project**: The sum of all activities performed under this governance, concerning one or more repositories or the community. +- **The Grafana Alloy project**: The sum of all activities performed under this governance, concerning one or more repositories or the community. ## Values -The Grafana Agent developers and community are expected to follow the values defined in the [Code of Conduct][coc]. Furthermore, the Grafana Agent community strives for kindness, giving feedback effectively, and building a welcoming environment. The Grafana Agent developers generally decide by consensus and only resort to conflict resolution by a majority vote if consensus cannot be reached. +The Grafana Alloy developers and community are expected to follow the values defined in the [Code of Conduct][coc]. Furthermore, the Grafana Alloy community strives for kindness, giving feedback effectively, and building a welcoming environment. The Grafana Alloy developers generally decide by consensus and only resort to conflict resolution by a majority vote if consensus cannot be reached. ## Projects @@ -26,7 +26,7 @@ Each project must have a [`MAINTAINERS.md`][maintainers] file with at least one ### Team members -Team member status may be given to those who have made ongoing contributions to the Grafana Agent project for at least 3 months. This is usually in the form of code improvements and/or notable work on documentation, but organizing events or user support could also be taken into account. +Team member status may be given to those who have made ongoing contributions to the Grafana Alloy project for at least 3 months. This is usually in the form of code improvements and/or notable work on documentation, but organizing events or user support could also be taken into account. New members may be proposed by any existing member by email to the [team mailing list][team]. It is highly desirable to reach consensus about acceptance of a new member. However, the proposal is ultimately voted on by a formal [supermajority vote](#supermajority-vote). @@ -46,20 +46,19 @@ In case a member leaves, the [offboarding](#offboarding) procedure is applied. The current team members are: -- Jorge Creixell - [jcreixell](https://github.com/jcreixell) ([Grafana Labs](https://grafana.com)) +- Erik Baranowski - [erikbaranowski](https://github.com/erikbaranowski) ([Grafana Labs](https://grafana.com)) +- William Dumont - [wildum](https://github.com/wildum) ([Grafana Labs](https://grafana.com)) - Matt Durham - [mattdurham](https://github.com/mattdurham) ([Grafana Labs](https://grafana.com)) -- Joe Elliott - [joe-elliott](https://github.com/joe-elliott) ([Grafana Labs](https://grafana.com)) - Robert Fratto - [rfratto](https://github.com/rfratto) ([Grafana Labs](https://grafana.com)) -- Richard Hartmann - [RichiH](https://github.com/RichiH) ([Grafana Labs](https://grafana.com)) -- Robert Lankford - [rlankfo](https://github.com/rlankfo) ([Grafana Labs](https://grafana.com)) -- Mario Rodriguez - [mapno](https://github.com/mapno) ([Grafana Labs](https://grafana.com)) +- Piotr Gwizdala - [thampiotr](https://github.com/thampiotr) ([Grafana Labs](https://grafana.com)) +- Paulin Todev - [ptodev](https://github.com/ptodev) ([Grafana Labs](https://grafana.com)) - Paschalis Tsilias - [tpaschalis](https://github.com/tpaschalis) ([Grafana Labs](https://grafana.com)) ### Maintainers Maintainers lead one or more project(s) or parts thereof and serve as a point of conflict resolution amongst the contributors to this project. Ideally, maintainers are also team members, but exceptions are possible for suitable maintainers that, for whatever reason, are not yet team members. -Changes in maintainership have to be announced on the [developers mailing list][devs]. They are decided by [rough consensus](#consensus) and formalized by changing the [`MAINTAINERS.md`][maintainers] file of the respective repository. +Changes in maintainership have to be announced on the [GitHub Discussions][discussions] page. They are decided by [rough consensus](#consensus) and formalized by changing the [`MAINTAINERS.md`][maintainers] file of the respective repository. Maintainers are granted commit rights to all projects covered by this governance. @@ -69,7 +68,7 @@ A project may have multiple maintainers, as long as the responsibilities are cle ### Technical decisions -Technical decisions that only affect a single project are made informally by the maintainer of this project, and [rough consensus](#consensus) is assumed. Technical decisions that span multiple parts of the project should be discussed and made on the [developer mailing list][devs]. +Technical decisions that only affect a single project are made informally by the maintainer of this project, and [rough consensus](#consensus) is assumed. Technical decisions that span multiple parts of the project should be discussed and made on the [GitHub discussions page][discussions]. Decisions are usually made by [rough consensus](#consensus). If no consensus can be reached, the matter may be resolved by [majority vote](#majority-vote). @@ -79,11 +78,11 @@ Changes to this document are made by Grafana Labs. ### Other matters -Any matter that needs a decision may be called to a vote by any member if they deem it necessary. For private or personnel matters, discussion and voting takes place on the [team mailing list][team], otherwise on the [developer mailing list][devs]. +Any matter that needs a decision may be called to a vote by any member if they deem it necessary. For private or personnel matters, discussion and voting takes place on the [team mailing list][team], otherwise on the [GitHub discussions page][discussions]. ## Voting -The Grafana Agent project usually runs by informal consensus, however sometimes a formal decision must be made. +The Grafana Alloy project usually runs by informal consensus, however sometimes a formal decision must be made. Depending on the subject matter, as laid out [above](#decision-making), different methods of voting are used. @@ -91,15 +90,15 @@ For all votes, voting must be open for at least one week. The end date should be In all cases, all and only [team members](#team-members) are eligible to vote, with the sole exception of the forced removal of a team member, in which said member is not eligible to vote. -Discussion and votes on personnel matters (including but not limited to team membership and maintainership) are held in private on the [team mailing list][team]. All other discussion and votes are held in public on the [developer mailing list][devs]. +Discussion and votes on personnel matters (including but not limited to team membership and maintainership) are held in private on the [team mailing list][team]. All other discussion and votes are held in public on the [GitHub discussions page][discussions]. For public discussions, anyone interested is encouraged to participate. Formal power to object or vote is limited to [team members](#team-members). ### Consensus -The default decision making mechanism for the Grafana Agent project is [rough][rough] consensus. This means that any decision on technical issues is considered supported by the [team][team] as long as nobody objects or the objection has been considered but not necessarily accommodated. +The default decision making mechanism for the Grafana Alloy project is [rough][rough] consensus. This means that any decision on technical issues is considered supported by the [team][team] as long as nobody objects or the objection has been considered but not necessarily accommodated. -Silence on any consensus decision is implicit agreement and equivalent to explicit agreement. Explicit agreement may be stated at will. Decisions may, but do not need to be called out and put up for decision on the [developers mailing list][devs] at any time and by anyone. +Silence on any consensus decision is implicit agreement and equivalent to explicit agreement. Explicit agreement may be stated at will. Decisions may, but do not need to be called out and put up for decision on the [GitHub discussions page][discussions] at any time and by anyone. Consensus decisions can never override or go against the spirit of an earlier explicit vote. @@ -134,7 +133,7 @@ If there are multiple alternatives, members may vote for one or more alternative The new member is - added to the list of [team members](#team-members). Ideally by sending a PR of their own, at least approving said PR. -- announced on the [developers mailing list][devs] by an existing team member. Ideally, the new member replies in this thread, acknowledging team membership. +- announced on the [GitHub discussions page][discussions] by an existing team member. Ideally, the new member replies in this thread, acknowledging team membership. - added to the projects with commit rights. - added to the [team mailing list][team]. @@ -151,9 +150,8 @@ The ex-member is If needed, we reserve the right to publicly announce removal. -[coc]: https://github.com/grafana/agent/blob/main/CODE_OF_CONDUCT.md -[devs]: https://groups.google.com/forum/#!forum/grafana-agent-developers -[maintainers]: https://github.com/grafana/agent/blob/main/MAINTAINERS.md +[coc]: https://github.com/grafana/alloy/blob/main/CODE_OF_CONDUCT.md +[maintainers]: https://github.com/grafana/alloy/blob/main/MAINTAINERS.md [rough]: https://tools.ietf.org/html/rfc7282 -[team]: https://groups.google.com/forum/#!forum/grafana-agent-team -[discussions]: https://github.com/grafana/agent/discussions +[team]: https://groups.google.com/forum/#!forum/grafana-alloy-team +[discussions]: https://github.com/grafana/alloy/discussions From 550fec426152ef0a7f3d495d6388ae8fb5b2445b Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 13:42:21 -0400 Subject: [PATCH 025/136] Implement standardized versioning (#44) * build: normalize version strings Normalize version strings so they are consistently reported as vVERSION, where VERSION is a valid semantic version string. Custom versions that are not valid semvers are returned as is, and empty strings are returned as v0.0.0. * tools/gen-versioned-files: move version file to root as VERSION * docs: update developer docs for managing VERSION file * tools/image-tag: report consistent build information This commit updates tools/image-tag to always report consistent build information, with escape hatches to force the reported version: 1. If RELEASE_TAG is set (which happens via Drone pipelines), then the script emits the value of the RELEASE_TAG environment variable. 2. If a build is being performed against a Git tag, then the script emits the value of that Git tag. 3. Finally, if neither of the above are true, the version from the VERSION file is used, followed by the prerelease being set to `devel` and the short SHA added as build metadata. Provided healthy workflwos where RELEASE_TAG and Git tags are always semantic versions, this guarantees that versions reported by builds are consistent and can be parsed properly. --- VERSION | 23 +++++++++++++ .../release/1-create-release-branch.md | 4 +++ .../release/3-update-version-in-code.md | 26 ++++++++------- docs/sources/_index.md | 2 +- internal/build/build.go | 21 ++++++++++++ internal/build/build_test.go | 33 +++++++++++++++++++ tools/gen-versioned-files/agent-version.txt | 1 - .../gen-versioned-files.sh | 14 ++++---- tools/image-tag | 33 +++++++++++++------ 9 files changed, 126 insertions(+), 31 deletions(-) create mode 100644 VERSION create mode 100644 internal/build/build_test.go delete mode 100644 tools/gen-versioned-files/agent-version.txt diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000..18bdb6741c --- /dev/null +++ b/VERSION @@ -0,0 +1,23 @@ +# This file is used to determine the semantic version for the build of Grafana +# Alloy being constructed is. +# +# For builds produced against release branches, the major and minor version +# indicated in this file match the release branch. For example, for the branch +# release/v1.0, this file will report v1.0.0. The patch version indicated by this +# file syncs with the patch release being planned. For example, if a v1.0.1 +# patch release is planned, this file will report v1.0.1 before that release is +# produced. +# +# For builds produced against main branches, the major and minor version +# reported by this file matches the next minor or major version to be released. +# For example, if v1.0.0 was just released, this file (in the main branch) will +# report v1.1.0, the next release planned. +# +# The string in this file MUST be a valid semantic version prefixed with "v", +# without any pre-release or build information. +# +# This file is ignored when building binaries from a Git tag. +# +# Lines starting with "#" and blank lines are ignored. + +v1.0.0 diff --git a/docs/developer/release/1-create-release-branch.md b/docs/developer/release/1-create-release-branch.md index 5e7a3c6508..b34e8806ef 100644 --- a/docs/developer/release/1-create-release-branch.md +++ b/docs/developer/release/1-create-release-branch.md @@ -24,3 +24,7 @@ Patch Releases for that major pr minor version of the agent. > **NOTE**: Don't create any other branches that are prefixed with `release` when creating PRs or those branches will collide with our automated release build publish rules. + +3. Open a PR against `main` to update the VERSION file at the root of the + repository to the next minor release planned. For example, if you have just + created `release/v1.0`, then VERSION should be updated to `v1.1.0`. diff --git a/docs/developer/release/3-update-version-in-code.md b/docs/developer/release/3-update-version-in-code.md index 6384973cc4..7a3c9cdc3b 100644 --- a/docs/developer/release/3-update-version-in-code.md +++ b/docs/developer/release/3-update-version-in-code.md @@ -22,18 +22,6 @@ The project must be updated to reference the upcoming release tag whenever a new 2. Move the unreleased changes we want to add to the release branch from `Main (unreleased)` to `VERSION (YYYY-MM-DD)`. - 3. Update appropriate places in the codebase that have the previous version with the new version determined above. - - First update `tools/gen-versioned-files/agent-version.txt` with the new `VERSION` and run: - - ``` - make generate-versioned-files - ``` - - Next, commit the changes (including those to `tools/gen-versioned-files/agent-version.txt`, as a workflow will use this version to ensure that the templates and generated files are in sync). - - * Do **not** update the `operations/helm` directory. It is updated independently from Agent releases. - 3. Create a PR to merge to main (must be merged before continuing). - Release Candidate example PR [here](https://github.com/grafana/agent/pull/3065) @@ -50,6 +38,20 @@ The project must be updated to reference the upcoming release tag whenever a new Delete the `Main (unreleased)` header and anything underneath it as part of the cherry-pick. Alternatively, do it after the cherry-pick is completed. +6. **If you are creating a patch release,** ensure that the file called `VERSION` in your branch matches the version being published, without any release candidate or build information: + + > **NOTE**: Only perform this step for patch releases, and make sure that + > the change is not pushed to the main branch. + + After updating `VERSION`, run: + + ```bash + make generate-versioned-files + ``` + + Next, commit the changes (including those to `VERSION`, as a workflow will use this version to ensure that the templates and generated files are in sync). + + 6. Create a PR to merge to `release/VERSION_PREFIX` (must be merged before continuing). - Release Candidate example PR [here](https://github.com/grafana/agent/pull/3066) diff --git a/docs/sources/_index.md b/docs/sources/_index.md index a751d5bbed..e13b013f8f 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -4,7 +4,7 @@ title: Grafana Alloy description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - ALLOY_RELEASE: $ALLOY_VERSION + ALLOY_RELEASE: v1.0.0 OTEL_VERSION: v0.87.0 PRODUCT_NAME: Grafana Alloy PRODUCT_ROOT_NAME: Alloy diff --git a/internal/build/build.go b/internal/build/build.go index 3e3b1bce12..43201285c4 100644 --- a/internal/build/build.go +++ b/internal/build/build.go @@ -1,6 +1,9 @@ package build import ( + "strings" + + "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" ) @@ -18,9 +21,27 @@ var ( ) func init() { + Version = normalizeVersion(Version) injectVersion() } +// normalizeVersion normalizes the version string to always contain a "v" +// prefix. If version cannot be parsed as a semantic version, version is returned unmodified. +// +// if version is empty, normalizeVersion returns "v0.0.0". +func normalizeVersion(version string) string { + version = strings.TrimSpace(version) + if version == "" { + return "v0.0.0" + } + + parsed, err := semver.ParseTolerant(version) + if err != nil { + return version + } + return "v" + parsed.String() +} + func injectVersion() { version.Version = Version version.Revision = Revision diff --git a/internal/build/build_test.go b/internal/build/build_test.go new file mode 100644 index 0000000000..9ba75abacc --- /dev/null +++ b/internal/build/build_test.go @@ -0,0 +1,33 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_normalizeVersion(t *testing.T) { + tt := []struct { + input string + expect string + }{ + {"", "v0.0.0"}, + {"v1.2.3", "v1.2.3"}, + {"1.2.3", "v1.2.3"}, + {"1.2.3+SHA", "v1.2.3+SHA"}, + {"v1.2.3+SHA", "v1.2.3+SHA"}, + {"1.2.3-rc.1", "v1.2.3-rc.1"}, + {"v1.2.3-rc.1", "v1.2.3-rc.1"}, + {"1.2.3-rc.1+SHA", "v1.2.3-rc.1+SHA"}, + {"v1.2.3-rc.1+SHA", "v1.2.3-rc.1+SHA"}, + {"not_semver", "not_semver"}, + } + + for _, tc := range tt { + actual := normalizeVersion(tc.input) + assert.Equal(t, tc.expect, actual, + "Expected %q to normalize to %q, got %q", + tc.input, tc.expect, actual, + ) + } +} diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt deleted file mode 100644 index 6b97562039..0000000000 --- a/tools/gen-versioned-files/agent-version.txt +++ /dev/null @@ -1 +0,0 @@ -v0.40.3 diff --git a/tools/gen-versioned-files/gen-versioned-files.sh b/tools/gen-versioned-files/gen-versioned-files.sh index fff14df68d..140fa6fe12 100755 --- a/tools/gen-versioned-files/gen-versioned-files.sh +++ b/tools/gen-versioned-files/gen-versioned-files.sh @@ -1,20 +1,20 @@ #!/bin/sh -AGENT_VERSION=$(cat ./tools/gen-versioned-files/agent-version.txt | tr -d '\n') +ALLOY_VERSION=$(sed -e '/^#/d' -e '/^$/d' VERSION | tr -d '\n') -if [ -z "$AGENT_VERSION" ]; then - echo "AGENT_VERSION can't be found. Are you running this from the repo root?" +if [ -z "$ALLOY_VERSION" ]; then + echo "ALLOY_VERSION can't be found. Are you running this from the repo root?" exit 1 fi -versionMatcher='^v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$' +versionMatcher='^v[0-9]+\.[0-9]+\.[0-9]$' -if ! echo "$AGENT_VERSION" | grep -Eq "$versionMatcher"; then - echo "AGENT_VERSION env var is not in the correct format. It should be in the format of vX.Y.Z or vX.Y.Z-rc.N" +if ! echo "$ALLOY_VERSION" | grep -Eq "$versionMatcher"; then + echo "ALLOY_VERSION env var is not in the correct format. It should be in the format of vX.Y.Z" exit 1 fi templates=$(find . -type f -name "*.t" -not -path "./.git/*") for template in $templates; do echo "Generating ${template%.t}" - sed -e "s/\$AGENT_VERSION/$AGENT_VERSION/g" < "$template" > "${template%.t}" + sed -e "s/\$ALLOY_VERSION/$ALLOY_VERSION/g" < "$template" > "${template%.t}" done diff --git a/tools/image-tag b/tools/image-tag index 389a1bd7e3..e85affbf59 100755 --- a/tools/image-tag +++ b/tools/image-tag @@ -1,21 +1,34 @@ #!/usr/bin/env bash +# +# image-tag determines which version to embed into a built image. +# +# It prefers the following in precedence order: +# +# 1. RELEASE_TAG environment variable +# 2. The Git tag of the current commit (if any) +# 3. The version in the VERSION file, suffixed with -devel plus build +# information. set -o errexit set -o pipefail +VERSION=$(sed -e '/^#/d' -e '/^$/d' VERSION | tr -d '\n') +DETECTED_TAG=$(git describe --match 'v*' --exact-match 2>/dev/null || echo -n "") + if [ ! -z "${RELEASE_TAG}" ]; then echo ${RELEASE_TAG} exit 0 +elif [ ! -z "${DETECTED_TAG}" ]; then + echo ${DETECTED_TAG} + exit 0 fi set -o nounset -WIP=$(git diff --quiet || echo '-WIP') -BRANCH=$(git rev-parse --abbrev-ref HEAD | sed 's#/#-#g') - -# When 7 chars are not enough to be unique, git automatically uses more. -# We are forcing to 7 here, as we are doing for grafana/grafana as well. -SHA=$(git rev-parse --short=7 HEAD | head -c7) - -# If this is a tag, use it, otherwise use - -TAG=$(git describe --exact-match 2> /dev/null || echo "${BRANCH}-${SHA}") -echo ${TAG}${WIP} +if [[ -z $(git status -s) ]]; then + # There are no changes; report version as VERSION-devel+SHA. + SHA=$(git rev-parse --short HEAD) + echo ${VERSION}-devel+${SHA} +else + # Git is dirty; tag as VERSION-devel+wip. + echo ${VERSION}-devel+wip +fi From 8098d4dbb0dc103941f886f38254b7951ab8635c Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 14:09:11 -0400 Subject: [PATCH 026/136] ci: build and publish development images on commit to main (#45) --- .drone/drone.yml | 45 +++++++++++++++++++++++- .drone/pipelines/publish.jsonnet | 60 +++++++++++++++++++++++++++++++- tools/ci/docker-containers | 24 +++++++++---- 3 files changed, 120 insertions(+), 9 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index fae58d3202..59fc5df03c 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -303,6 +303,49 @@ trigger: type: docker --- kind: pipeline +name: Publish development Linux agent container +platform: + arch: amd64 + os: linux +steps: +- commands: + - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + failure: ignore + image: grafana/agent-build-image:0.40.2 + name: Configure QEMU + volumes: + - name: docker + path: /var/run/docker.sock +- commands: + - mkdir -p $HOME/.docker + - printenv GCR_CREDS > $HOME/.docker/config.json + - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD + - docker buildx create --name multiarch-agent-agent-${DRONE_COMMIT_SHA} --driver + docker-container --use + - DEVELOPMENT=1 ./tools/ci/docker-containers agent + - docker buildx rm multiarch-agent-agent-${DRONE_COMMIT_SHA} + environment: + DOCKER_LOGIN: + from_secret: docker_login + DOCKER_PASSWORD: + from_secret: docker_password + GCR_CREDS: + from_secret: gcr_admin + image: grafana/agent-build-image:0.40.2 + name: Publish container + volumes: + - name: docker + path: /var/run/docker.sock +trigger: + ref: + - refs/heads/main +type: docker +volumes: +- host: + path: /var/run/docker.sock + name: docker +--- +kind: pipeline name: Test Linux system packages platform: arch: amd64 @@ -407,6 +450,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 59c741cd4e3cd3f555cbf0165da386b269a7f54987fe5a2aba621edc6ebb09a5 +hmac: eb4c87d4abc880513c7c2977c46910fa96041461aa2edea16a7970f5c145dd01 ... diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index e3117042a3..7501017ac3 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -7,6 +7,61 @@ local ghTokenFilename = '/drone/src/gh-token.txt'; local job_names = function(jobs) std.map(function(job) job.name, jobs); local linux_containers = ['agent', 'agent-boringcrypto']; +local dev_linux_containers = ['agent']; // TODO(rfratto): add boringcrypto after figuring out what to do with it + +local linux_containers_dev_jobs = std.map(function(container) ( + pipelines.linux('Publish development Linux %s container' % container) { + trigger: { + ref: [ + 'refs/heads/main', + ], + }, + steps: [{ + // We only need to run this once per machine, so it's OK if it fails. It + // is also likely to fail when run in parallel on the same machine. + name: 'Configure QEMU', + image: build_image.linux, + failure: 'ignore', + volumes: [{ + name: 'docker', + path: '/var/run/docker.sock', + }], + commands: [ + 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes', + ], + }, { + name: 'Publish container', + image: build_image.linux, + volumes: [{ + name: 'docker', + path: '/var/run/docker.sock', + }], + environment: { + DOCKER_LOGIN: secrets.docker_login.fromSecret, + DOCKER_PASSWORD: secrets.docker_password.fromSecret, + GCR_CREDS: secrets.gcr_admin.fromSecret, + }, + commands: [ + 'mkdir -p $HOME/.docker', + 'printenv GCR_CREDS > $HOME/.docker/config.json', + 'docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD', + + // Create a buildx worker for our cross platform builds. + 'docker buildx create --name multiarch-agent-%s-${DRONE_COMMIT_SHA} --driver docker-container --use' % container, + + 'DEVELOPMENT=1 ./tools/ci/docker-containers %s' % container, + + 'docker buildx rm multiarch-agent-%s-${DRONE_COMMIT_SHA}' % container, + ], + }], + volumes: [{ + name: 'docker', + host: { path: '/var/run/docker.sock' }, + }], + } +), dev_linux_containers); + + local linux_containers_jobs = std.map(function(container) ( pipelines.linux('Publish Linux %s container' % container) { trigger: { @@ -94,7 +149,10 @@ local windows_containers_jobs = std.map(function(container) ( // TODO(rfratto): The following are TEMPORARILY disabled as grafana/alloy gets // set up. Remove the line below in favor of the comment block to reenable the // publish jobs. -[] +// +// This file must be refactored in the future after development has fully +// shifted. +linux_containers_dev_jobs /* linux_containers_jobs + windows_containers_jobs + [ diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index e81de10834..76f5c1d19f 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -7,6 +7,14 @@ # from a Drone trigger. set -euxo pipefail +RELEASE_AGENT_IMAGE=grafana/agent +RELEASE_AGENTBORINGCRYPTO_IMAGE=grafana/agent-boringcrypto +DEVELOPMENT_AGENT_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev +DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-boringcrypto-dev + +DEFAULT_AGENT_IMAGE=${RELEASE_AGENT_IMAGE} +DEFAULT_AGENTBORINGCRYPTO_IMAGE=${RELEASE_AGENTBORINGCRYPTO_IMAGE} + # Environment variables used throughout this script. These must be set # otherwise bash will fail with an "unbound variable" error because of the `set # -u` flag on the above line. @@ -15,10 +23,15 @@ set -euxo pipefail # empty string. export TARGET_CONTAINER=${1:-} export DRONE_TAG=${DRONE_TAG:-} -export DRONE_BRANCH=${DRONE_BRANCH:-} +export DEVELOPMENT=${DEVELOPMENT:-} + +if [ -n "$DEVELOPMENT" ]; then + DEFAULT_AGENT_IMAGE=${DEVELOPMENT_AGENT_IMAGE} + DEFAULT_AGENTBORINGCRYPTO_IMAGE=${DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE} +fi -export AGENT_IMAGE=grafana/agent -export AGENT_BORINGCRYPTO_IMAGE=grafana/agent-boringcrypto +export AGENT_IMAGE=${DEFAULT_AGENT_IMAGE} +export AGENT_BORINGCRYPTO_IMAGE=${DEFAULT_AGENTBORINGCRYPTO_IMAGE} # We need to determine what version to assign to built binaries. If containers # are being built from a Drone tag trigger, we force the version to come from the @@ -33,16 +46,13 @@ fi # We also need to know which "branch tag" to update. Branch tags are used as a # secondary tag for Docker containers. The branch tag is "latest" when being -# tagged from a stable release (i.e., not a release candidate) or the Drone -# branch when coming from a Drone job. +# tagged from a stable release (i.e., not a release candidate). # # If we're not running from drone, we'll set the branch tag to match the # version. This effectively acts as a no-op because it will tag the same Docker # image twice. if [ -n "$DRONE_TAG" ] && [[ "$DRONE_TAG" != *"-rc."* ]]; then BRANCH_TAG=latest -elif [ -n "$DRONE_BRANCH" ]; then - BRANCH_TAG=$DRONE_BRANCH else BRANCH_TAG=$VERSION fi From ff0072fbec8056c4ca69879d114b2702ba387bda Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 14:30:18 -0400 Subject: [PATCH 027/136] tools/ci: sanitize Docker tags (#46) Sanitize Docker tags to remove characters used in semver that are not valid in Docker tags (primarily `+`) --- tools/ci/docker-containers | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index 76f5c1d19f..9a421eb8cb 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -44,6 +44,11 @@ else VERSION=$(./tools/image-tag) fi +# The TAG_VERSION is the version to use for the Docker tag. It is sanitized to +# force it to be a valid tag name; ./tools/image-tag can emit characters that +# are valid for semver but invalid for Docker tags, such as +. +TAG_VERSION=${VERSION//+/-} + # We also need to know which "branch tag" to update. Branch tags are used as a # secondary tag for Docker containers. The branch tag is "latest" when being # tagged from a stable release (i.e., not a release candidate). @@ -54,7 +59,7 @@ fi if [ -n "$DRONE_TAG" ] && [[ "$DRONE_TAG" != *"-rc."* ]]; then BRANCH_TAG=latest else - BRANCH_TAG=$VERSION + BRANCH_TAG=$TAG_VERSION fi # Build all of our images. @@ -68,21 +73,21 @@ case "$TARGET_CONTAINER" in --platform $BUILD_PLATFORMS \ --build-arg RELEASE_BUILD=1 \ --build-arg VERSION="$VERSION" \ - -t "$AGENT_IMAGE:$VERSION" \ + -t "$AGENT_IMAGE:$TAG_VERSION" \ -t "$AGENT_IMAGE:$BRANCH_TAG" \ -f cmd/grafana-agent/Dockerfile \ . ;; agent-boringcrypto) - docker buildx build --push \ - --platform $BUILD_PLATFORMS_BORINGCRYPTO \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - --build-arg GOEXPERIMENT=boringcrypto \ - -t "$AGENT_BORINGCRYPTO_IMAGE:$VERSION" \ - -t "$AGENT_BORINGCRYPTO_IMAGE:$BRANCH_TAG" \ - -f cmd/grafana-agent/Dockerfile \ + docker buildx build --push \ + --platform $BUILD_PLATFORMS_BORINGCRYPTO \ + --build-arg RELEASE_BUILD=1 \ + --build-arg VERSION="$VERSION" \ + --build-arg GOEXPERIMENT=boringcrypto \ + -t "$AGENT_BORINGCRYPTO_IMAGE:$TAG_VERSION" \ + -t "$AGENT_BORINGCRYPTO_IMAGE:$BRANCH_TAG" \ + -f cmd/grafana-agent/Dockerfile \ . ;; From 492529d0702081dab263d4743fa66f1f973004f6 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 14:48:29 -0400 Subject: [PATCH 028/136] ci: fix name for GAR repo for development builds (#47) --- tools/ci/docker-containers | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index 9a421eb8cb..44c14cb4c6 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -9,8 +9,8 @@ set -euxo pipefail RELEASE_AGENT_IMAGE=grafana/agent RELEASE_AGENTBORINGCRYPTO_IMAGE=grafana/agent-boringcrypto -DEVELOPMENT_AGENT_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev -DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-boringcrypto-dev +DEVELOPMENT_AGENT_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy +DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy-boringcrypto DEFAULT_AGENT_IMAGE=${RELEASE_AGENT_IMAGE} DEFAULT_AGENTBORINGCRYPTO_IMAGE=${RELEASE_AGENTBORINGCRYPTO_IMAGE} From 3f26b07574212603306da46634e7074085847938 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 16:37:48 -0400 Subject: [PATCH 029/136] ci: reenable some CI jobs (#48) Not all CI jobs need to be disabled at this point; this reenables as much as possible. --- .../workflows/bump-formula-pr.yml.disabled | 29 +------------------ ...l.disabled => check-linux-build-image.yml} | 6 ++-- ...disabled => check-windows-build-image.yml} | 4 +-- ...-release.yml.disabled => helm-release.yml} | 6 ++-- 4 files changed, 10 insertions(+), 35 deletions(-) rename .github/workflows/{check-linux-build-image.yml.disabled => check-linux-build-image.yml} (88%) rename .github/workflows/{check-windows-build-image.yml.disabled => check-windows-build-image.yml} (81%) rename .github/workflows/{helm-release.yml.disabled => helm-release.yml} (98%) diff --git a/.github/workflows/bump-formula-pr.yml.disabled b/.github/workflows/bump-formula-pr.yml.disabled index 54d995b5df..c5e4606387 100644 --- a/.github/workflows/bump-formula-pr.yml.disabled +++ b/.github/workflows/bump-formula-pr.yml.disabled @@ -4,33 +4,6 @@ on: types: [released] jobs: - homebrew-core: - name: homebrew-core - runs-on: ubuntu-latest - steps: - - name: Get latest release - uses: rez0n/actions-github-release@main - id: latest_release - with: - token: ${{ secrets.GITHUB_TOKEN }} - repository: "${{ github.repository }}" - type: "stable" - - - name: Update Homebrew formula - if: 'steps.latest_release.outputs.release_id == github.event.release.id' - uses: dawidd6/action-homebrew-bump-formula@v3 - with: - # Required, custom GitHub access token with the 'public_repo' and 'workflow' scopes - token: ${{secrets.HOMEBREW_FORMULA_GH_TOKEN}} - # Formula name, required - formula: grafana-agent - # Optional, will be determined automatically - tag: ${{github.ref}} - # Optional, will be determined automatically - revision: ${{github.sha}} - # Optional, if don't want to check for already open PRs - force: false # true - homebrew-grafana: name: homebrew-grafana runs-on: ubuntu-latest @@ -52,7 +25,7 @@ jobs: # Optional, defaults to homebrew/core tap: grafana/grafana # Formula name, required - formula: grafana-agent-flow + formula: alloy # Optional, will be determined automatically tag: ${{github.ref}} # Optional, will be determined automatically diff --git a/.github/workflows/check-linux-build-image.yml.disabled b/.github/workflows/check-linux-build-image.yml similarity index 88% rename from .github/workflows/check-linux-build-image.yml.disabled rename to .github/workflows/check-linux-build-image.yml index 300d203d36..a064bffe75 100644 --- a/.github/workflows/check-linux-build-image.yml.disabled +++ b/.github/workflows/check-linux-build-image.yml @@ -23,7 +23,7 @@ jobs: with: context: ./build-image push: false - tags: grafana/agent-build-image:latest + tags: grafana/alloy-build-image:latest build-args: | GO_RUNTIME=golang:1.22.1-bullseye @@ -32,6 +32,6 @@ jobs: with: context: ./build-image push: false - tags: grafana/agent-build-image:latest + tags: grafana/alloy-build-image:latest build-args: | - GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye \ No newline at end of file + GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye diff --git a/.github/workflows/check-windows-build-image.yml.disabled b/.github/workflows/check-windows-build-image.yml similarity index 81% rename from .github/workflows/check-windows-build-image.yml.disabled rename to .github/workflows/check-windows-build-image.yml index 33de47eb98..67cefbca79 100644 --- a/.github/workflows/check-windows-build-image.yml.disabled +++ b/.github/workflows/check-windows-build-image.yml @@ -15,8 +15,8 @@ jobs: - name: Create test Windows build image uses: mr-smithers-excellent/docker-build-push@v6 with: - image: grafana/agent-build-image + image: grafana/alloy-build-image tags: latest registry: docker.io pushImage: false - dockerfile: ./build-image/windows/Dockerfile \ No newline at end of file + dockerfile: ./build-image/windows/Dockerfile diff --git a/.github/workflows/helm-release.yml.disabled b/.github/workflows/helm-release.yml similarity index 98% rename from .github/workflows/helm-release.yml.disabled rename to .github/workflows/helm-release.yml index 18e3744506..03ba440b33 100644 --- a/.github/workflows/helm-release.yml.disabled +++ b/.github/workflows/helm-release.yml @@ -1,7 +1,9 @@ name: Release Helm chart on: - push: - branches: [main] + # TODO(rfratto): uncomment these after launch + # push: + # branches: [main] + workflow_dispatch: {} env: CR_CONFIGFILE: "${{ github.workspace }}/source/operations/helm/cr.yaml" CT_CONFIGFILE: "${{ github.workspace }}/source/operations/helm/ct.yaml" From 1d609ea6e8ba603518c93e7fb58d8f3dc44ae37c Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 19 Mar 2024 16:43:21 -0400 Subject: [PATCH 030/136] .github: reenable dependabot config (#49) This re-enables the dependabot config in favor of enabling/disabling it via GitHub settings. --- .github/{dependabot.yml.disabled => dependabot.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{dependabot.yml.disabled => dependabot.yml} (100%) diff --git a/.github/dependabot.yml.disabled b/.github/dependabot.yml similarity index 100% rename from .github/dependabot.yml.disabled rename to .github/dependabot.yml From 95f66e03eebbe5a43c32af6823d60709b0e585d7 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 20 Mar 2024 12:50:44 -0400 Subject: [PATCH 031/136] docs: Update editor support (#54) * docs: link to new repos for editor support This also temporarily removes removes emacs support as it needs to be updated to support raw strings before we take it over. * docs: remove other tooling not currently ported for Alloy Other tooling, such as the treesitter grammar, is not officially maintained by the Alloy developers and so it's being temporarily removed from the documentation until an Alloy equivalent exists that we can link to. --- docs/sources/concepts/config-language/_index.md | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/docs/sources/concepts/config-language/_index.md b/docs/sources/concepts/config-language/_index.md index 799b4586fc..2a6fca6ebd 100644 --- a/docs/sources/concepts/config-language/_index.md +++ b/docs/sources/concepts/config-language/_index.md @@ -109,18 +109,10 @@ The preceding example has two blocks: You can use one or all of the following tools to help you write configuration files in River. -* Experimental editor support for - * [vim](https://github.com/rfratto/vim-river) - * [VSCode](https://github.com/rfratto/vscode-river) - * [river-mode](https://github.com/jdbaldry/river-mode) for Emacs +* Editor support for: + * [VSCode](https://github.com/grafana/vscode-alloy) + * [Vim/Neovim](https://github.com/grafana/vim-alloy) * Code formatting using the [`agent fmt` command][fmt] -You can also start developing your own tooling using the {{< param "PRODUCT_ROOT_NAME" >}} repository as a go package or use the -[tree-sitter grammar][] with other programming languages. - [RFC]: https://github.com/grafana/agent/blob/97a55d0d908b26dbb1126cc08b6dcc18f6e30087/docs/rfcs/0005-river.md -[vim]: https://github.com/rfratto/vim-river -[VSCode]: https://github.com/rfratto/vscode-river -[river-mode]: https://github.com/jdbaldry/river-mode -[tree-sitter grammar]: https://github.com/grafana/tree-sitter-river [fmt]: ../../reference/cli/fmt/ From 4272894283fe8c1b1a27582ea53444957804d50b Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Thu, 21 Mar 2024 11:14:32 -0400 Subject: [PATCH 032/136] all: remove dead code from static mode (#55) While grafana/alloy#15 removed static mode and operator from being reachable from the binary, all static mode code remained in the database to be removed later. This commit removes all unreachable code; what remains is constrained to config structre which is still used for config conversion. This commit does accidentally remove code that changed the order of static mode conversion components. This isn't a big deal, and the converted configs are still correct, so that code is being left out rather than being hunted down. (It is likely code that implements an interface but wasn't called directly.) --- Makefile | 12 +- .../testdata-v2/integrations_v2.river | 94 +-- .../testdata-v2/unsupported.river | 8 +- .../staticconvert/testdata/integrations.river | 54 +- .../staticconvert/testdata/integrations.yaml | 2 +- internal/static/agentctl/sync.go | 136 ---- internal/static/agentctl/sync_test.go | 137 ---- .../static/agentctl/testdata/agent-1.yaml | 12 - .../static/agentctl/testdata/agent-2.yaml | 12 - .../static/agentctl/testdata/agent-3.yaml | 12 - internal/static/agentproto/agent.pb.go | 416 ----------- internal/static/agentproto/agent.proto | 20 - internal/static/agentproto/func.go | 21 - internal/static/agentproto/gen.go | 3 - internal/static/client/client.go | 179 ----- .../agent_management_remote_config_test.go | 364 ---------- internal/static/config/agentmanagement.go | 308 -------- .../config/agentmanagement_remote_config.go | 179 ----- .../static/config/agentmanagement_test.go | 460 ------------ internal/static/config/config.go | 130 ---- internal/static/config/config_test.go | 71 +- internal/static/config/integrations.go | 65 -- internal/static/config/integrations_test.go | 69 -- internal/static/config/remote_config.go | 145 ---- internal/static/config/remote_config_test.go | 155 ---- .../integrations/cadvisor/cadvisor_stub.go | 25 +- internal/static/integrations/manager.go | 418 ----------- internal/static/integrations/manager_test.go | 433 ------------ .../static/integrations/stub_integration.go | 27 - .../app_agent_receiver/app_agent_receiver.go | 180 +---- .../app_agent_receiver_test.go | 169 ----- .../v2/app_agent_receiver/handler.go | 126 ---- .../v2/app_agent_receiver/handler_test.go | 356 ---------- .../v2/app_agent_receiver/logs_exporter.go | 140 ---- .../app_agent_receiver/logs_exporter_test.go | 120 ---- .../v2/app_agent_receiver/payload.go | 420 ----------- .../v2/app_agent_receiver/payload_test.go | 142 ---- .../receiver_metrics_exporter.go | 61 -- .../receiver_metrics_test.go | 141 ---- .../v2/app_agent_receiver/sourcemaps.go | 357 ---------- .../v2/app_agent_receiver/sourcemaps_test.go | 495 ------------- .../v2/app_agent_receiver/testdata/foo.js | 39 -- .../v2/app_agent_receiver/testdata/foo.js.map | 1 - .../app_agent_receiver/testdata/payload.json | 330 --------- .../testdata/payload_2.json | 393 ----------- .../v2/app_agent_receiver/traces_exporter.go | 41 -- .../v2/app_agent_receiver/traces_test.go | 53 -- .../v2/app_agent_receiver/utils.go | 84 --- .../v2/app_agent_receiver/utils_test.go | 36 - .../integrations/v2/autoscrape/appender.go | 42 -- .../integrations/v2/autoscrape/autoscrape.go | 266 ------- .../v2/autoscrape/autoscrape_test.go | 118 ---- internal/static/integrations/v2/controller.go | 444 ------------ .../v2/controller_httpintegration_test.go | 259 ------- .../v2/controller_metricsintegration_test.go | 184 ----- .../static/integrations/v2/controller_test.go | 286 -------- .../v2/controller_updateintegration_test.go | 79 --- .../v2/eventhandler/eventhandler.go | 472 ------------- .../v2/eventhandler/eventhandler_test.go | 54 -- .../v2/eventhandler/integration.go | 17 +- .../eventhandler/testdata/eventhandler.cache | 1 - .../static/integrations/v2/integrations.go | 11 - internal/static/integrations/v2/subsystem.go | 180 ----- .../static/integrations/v2/targetgroup.go | 28 - internal/static/integrations/v2/workers.go | 122 ---- internal/static/logs/http.go | 84 --- internal/static/logs/http_test.go | 177 ----- internal/static/logs/logs.go | 222 ------ internal/static/logs/logs_test.go | 206 ------ internal/static/metrics/agent.go | 268 +------ internal/static/metrics/agent_test.go | 226 ------ internal/static/metrics/cleaner.go | 271 ------- internal/static/metrics/cleaner_test.go | 146 ---- .../static/metrics/cluster/client/client.go | 50 -- internal/static/metrics/cluster/cluster.go | 179 ----- .../static/metrics/cluster/config_watcher.go | 340 --------- .../metrics/cluster/config_watcher_test.go | 267 ------- .../static/metrics/cluster/configapi/types.go | 73 -- internal/static/metrics/cluster/node.go | 381 ---------- internal/static/metrics/cluster/node_test.go | 223 ------ internal/static/metrics/cluster/validation.go | 150 ---- .../static/metrics/cluster/validation_test.go | 118 ---- internal/static/metrics/http.go | 166 ----- internal/static/metrics/http_test.go | 142 ---- .../metrics/instance/configstore/api.go | 268 ------- .../metrics/instance/configstore/api_test.go | 408 ----------- .../metrics/instance/configstore/codec.go | 65 -- .../instance/configstore/codec_test.go | 41 -- .../metrics/instance/configstore/errors.go | 27 - .../metrics/instance/configstore/mock.go | 74 -- .../metrics/instance/configstore/remote.go | 471 ------------- .../instance/configstore/remote_test.go | 271 ------- .../metrics/instance/configstore/store.go | 49 -- .../metrics/instance/configstore/unique.go | 35 - internal/static/metrics/instance/errors.go | 44 -- .../static/metrics/instance/group_manager.go | 358 ---------- .../metrics/instance/group_manager_test.go | 446 ------------ .../static/metrics/instance/host_filter.go | 238 ------- .../metrics/instance/host_filter_test.go | 201 ------ internal/static/metrics/instance/instance.go | 661 ------------------ .../instance/instance_integration_test.go | 281 -------- .../static/metrics/instance/instance_test.go | 250 ------- internal/static/metrics/instance/manager.go | 379 ---------- .../static/metrics/instance/manager_test.go | 158 ----- .../static/metrics/instance/modal_manager.go | 178 ----- internal/static/metrics/instance/noop.go | 49 -- internal/static/server/logger.go | 118 ---- internal/static/server/logger_test.go | 58 -- internal/static/server/logger_windows.go | 110 --- internal/static/server/server.go | 429 ------------ internal/static/server/server_test.go | 193 ----- internal/static/server/signal_context.go | 41 -- internal/static/server/tls.go | 152 ---- internal/static/server/tls_certstore_stub.go | 6 - .../static/server/tls_certstore_windows.go | 60 -- internal/static/server/tls_test.go | 68 -- .../static/supportbundle/supportbundle.go | 235 ------- .../automaticloggingprocessor.go | 209 +----- .../automaticloggingprocessor_test.go | 238 ------- internal/static/traces/instance.go | 194 ----- .../traces/remotewriteexporter/exporter.go | 296 +------- .../remotewriteexporter/exporter_test.go | 183 ----- internal/static/traces/traces.go | 111 --- internal/static/traces/traces_test.go | 193 ----- internal/util/log/log.go | 114 --- internal/util/otel_feature_gate.go | 55 +- internal/util/otel_feature_gate_test.go | 4 - internal/util/sanitize.go | 10 - internal/util/structwalk/structwalk.go | 77 -- internal/util/structwalk/structwalk_test.go | 63 -- internal/util/subset/subset.go | 120 ---- internal/util/subset/subset_test.go | 92 --- internal/util/unregisterer.go | 63 -- 133 files changed, 160 insertions(+), 22187 deletions(-) delete mode 100644 internal/static/agentctl/sync.go delete mode 100644 internal/static/agentctl/sync_test.go delete mode 100644 internal/static/agentctl/testdata/agent-1.yaml delete mode 100644 internal/static/agentctl/testdata/agent-2.yaml delete mode 100644 internal/static/agentctl/testdata/agent-3.yaml delete mode 100644 internal/static/agentproto/agent.pb.go delete mode 100644 internal/static/agentproto/agent.proto delete mode 100644 internal/static/agentproto/func.go delete mode 100644 internal/static/agentproto/gen.go delete mode 100644 internal/static/client/client.go delete mode 100644 internal/static/config/agent_management_remote_config_test.go delete mode 100644 internal/static/config/agentmanagement_remote_config.go delete mode 100644 internal/static/config/remote_config.go delete mode 100644 internal/static/config/remote_config_test.go delete mode 100644 internal/static/integrations/manager_test.go delete mode 100644 internal/static/integrations/stub_integration.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/app_agent_receiver_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/handler.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/handler_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/logs_exporter.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/logs_exporter_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/payload.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/payload_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/receiver_metrics_exporter.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/receiver_metrics_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/sourcemaps.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/sourcemaps_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/testdata/foo.js delete mode 100644 internal/static/integrations/v2/app_agent_receiver/testdata/foo.js.map delete mode 100644 internal/static/integrations/v2/app_agent_receiver/testdata/payload.json delete mode 100644 internal/static/integrations/v2/app_agent_receiver/testdata/payload_2.json delete mode 100644 internal/static/integrations/v2/app_agent_receiver/traces_exporter.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/traces_test.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/utils.go delete mode 100644 internal/static/integrations/v2/app_agent_receiver/utils_test.go delete mode 100644 internal/static/integrations/v2/autoscrape/appender.go delete mode 100644 internal/static/integrations/v2/autoscrape/autoscrape_test.go delete mode 100644 internal/static/integrations/v2/controller.go delete mode 100644 internal/static/integrations/v2/controller_httpintegration_test.go delete mode 100644 internal/static/integrations/v2/controller_metricsintegration_test.go delete mode 100644 internal/static/integrations/v2/controller_test.go delete mode 100644 internal/static/integrations/v2/controller_updateintegration_test.go delete mode 100644 internal/static/integrations/v2/eventhandler/eventhandler.go delete mode 100644 internal/static/integrations/v2/eventhandler/eventhandler_test.go delete mode 100644 internal/static/integrations/v2/eventhandler/testdata/eventhandler.cache delete mode 100644 internal/static/integrations/v2/targetgroup.go delete mode 100644 internal/static/integrations/v2/workers.go delete mode 100644 internal/static/logs/http.go delete mode 100644 internal/static/logs/http_test.go delete mode 100644 internal/static/logs/logs_test.go delete mode 100644 internal/static/metrics/cleaner.go delete mode 100644 internal/static/metrics/cleaner_test.go delete mode 100644 internal/static/metrics/cluster/cluster.go delete mode 100644 internal/static/metrics/cluster/config_watcher.go delete mode 100644 internal/static/metrics/cluster/config_watcher_test.go delete mode 100644 internal/static/metrics/cluster/configapi/types.go delete mode 100644 internal/static/metrics/cluster/node.go delete mode 100644 internal/static/metrics/cluster/node_test.go delete mode 100644 internal/static/metrics/cluster/validation.go delete mode 100644 internal/static/metrics/cluster/validation_test.go delete mode 100644 internal/static/metrics/http.go delete mode 100644 internal/static/metrics/http_test.go delete mode 100644 internal/static/metrics/instance/configstore/api.go delete mode 100644 internal/static/metrics/instance/configstore/api_test.go delete mode 100644 internal/static/metrics/instance/configstore/codec.go delete mode 100644 internal/static/metrics/instance/configstore/codec_test.go delete mode 100644 internal/static/metrics/instance/configstore/errors.go delete mode 100644 internal/static/metrics/instance/configstore/mock.go delete mode 100644 internal/static/metrics/instance/configstore/remote.go delete mode 100644 internal/static/metrics/instance/configstore/remote_test.go delete mode 100644 internal/static/metrics/instance/configstore/store.go delete mode 100644 internal/static/metrics/instance/configstore/unique.go delete mode 100644 internal/static/metrics/instance/errors.go delete mode 100644 internal/static/metrics/instance/group_manager.go delete mode 100644 internal/static/metrics/instance/group_manager_test.go delete mode 100644 internal/static/metrics/instance/host_filter.go delete mode 100644 internal/static/metrics/instance/host_filter_test.go delete mode 100644 internal/static/metrics/instance/instance_integration_test.go delete mode 100644 internal/static/metrics/instance/manager.go delete mode 100644 internal/static/metrics/instance/manager_test.go delete mode 100644 internal/static/metrics/instance/noop.go delete mode 100644 internal/static/server/logger.go delete mode 100644 internal/static/server/logger_test.go delete mode 100644 internal/static/server/logger_windows.go delete mode 100644 internal/static/server/server_test.go delete mode 100644 internal/static/server/signal_context.go delete mode 100644 internal/static/server/tls_test.go delete mode 100644 internal/static/supportbundle/supportbundle.go delete mode 100644 internal/static/traces/instance.go delete mode 100644 internal/static/traces/remotewriteexporter/exporter_test.go delete mode 100644 internal/static/traces/traces.go delete mode 100644 internal/static/traces/traces_test.go delete mode 100644 internal/util/sanitize.go delete mode 100644 internal/util/structwalk/structwalk.go delete mode 100644 internal/util/structwalk/structwalk_test.go delete mode 100644 internal/util/subset/subset.go delete mode 100644 internal/util/subset/subset_test.go delete mode 100644 internal/util/unregisterer.go diff --git a/Makefile b/Makefile index 47ce69da76..f844732d9a 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,6 @@ ## generate-drone Generate the Drone YAML from Jsonnet. ## generate-helm-docs Generate Helm chart documentation. ## generate-helm-tests Generate Helm chart tests. -## generate-protos Generate protobuf files. ## generate-ui Generate the UI assets. ## generate-versioned-files Generate versioned files. ## @@ -219,8 +218,8 @@ agent-boringcrypto-image: # Targets for generating assets # -.PHONY: generate generate-drone generate-helm-docs generate-helm-tests generate-protos generate-ui generate-versioned-files -generate: generate-drone generate-helm-docs generate-helm-tests generate-protos generate-ui generate-versioned-files generate-docs +.PHONY: generate generate-drone generate-helm-docs generate-helm-tests generate-ui generate-versioned-files +generate: generate-drone generate-helm-docs generate-helm-tests generate-ui generate-versioned-files generate-docs generate-drone: drone jsonnet -V BUILD_IMAGE_VERSION=$(BUILD_IMAGE_VERSION) --stream --format --source .drone/drone.jsonnet --target .drone/drone.yml @@ -239,13 +238,6 @@ else bash ./operations/helm/scripts/rebuild-tests.sh endif -generate-protos: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - go generate ./internal/static/agentproto/ -endif - generate-ui: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) diff --git a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river index f7c22ade64..b8a1b67208 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river +++ b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river @@ -21,6 +21,32 @@ logging { format = "json" } +faro.receiver "integrations_app_agent_receiver" { + extra_log_labels = {} + + server { + listen_address = "localhost" + listen_port = 55678 + max_allowed_payload_size = "4MiB786KiB832B" + + rate_limiting { + enabled = true + rate = 100 + burst_size = 50 + } + } + + sourcemaps { + download_from_origins = ["*"] + download_timeout = "1s" + } + + output { + logs = [loki.write.logs_log_config.receiver] + traces = [] + } +} + loki.relabel "integrations_eventhandler" { forward_to = [loki.write.logs_log_config.receiver] @@ -237,27 +263,6 @@ prometheus.scrape "integrations_elasticsearch" { job_name = "integrations/elasticsearch" } -prometheus.exporter.gcp "integrations_gcp_exporter" { - project_ids = [""] - metrics_prefixes = ["loadbalancing.googleapis.com/https/request_bytes_count", "loadbalancing.googleapis.com/https/total_latencies"] - extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] -} - -discovery.relabel "integrations_gcp" { - targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets - - rule { - target_label = "job" - replacement = "integrations/gcp" - } -} - -prometheus.scrape "integrations_gcp" { - targets = discovery.relabel.integrations_gcp.output - forward_to = [prometheus.remote_write.metrics_default.receiver] - job_name = "integrations/gcp" -} - prometheus.exporter.github "integrations_github_exporter" { repositories = ["grafana/agent", "grafana/agent-modules"] api_token = "ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL" @@ -680,32 +685,6 @@ prometheus.scrape "integrations_apache2" { job_name = "integrations/apache2" } -faro.receiver "integrations_app_agent_receiver" { - extra_log_labels = {} - - server { - listen_address = "localhost" - listen_port = 55678 - max_allowed_payload_size = "4MiB786KiB832B" - - rate_limiting { - enabled = true - rate = 100 - burst_size = 50 - } - } - - sourcemaps { - download_from_origins = ["*"] - download_timeout = "1s" - } - - output { - logs = [loki.write.logs_log_config.receiver] - traces = [] - } -} - prometheus.exporter.blackbox "integrations_blackbox" { config = "modules:\n http_2xx:\n prober: http\n timeout: 5s\n http:\n method: POST\n headers:\n Content-Type: application/json\n body: '{}'\n preferred_ip_protocol: ip4\n" @@ -762,3 +741,24 @@ prometheus.scrape "integrations_snmp" { forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/snmp" } + +prometheus.exporter.gcp "integrations_gcp_exporter" { + project_ids = [""] + metrics_prefixes = ["loadbalancing.googleapis.com/https/request_bytes_count", "loadbalancing.googleapis.com/https/total_latencies"] + extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] +} + +discovery.relabel "integrations_gcp" { + targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/gcp" + } +} + +prometheus.scrape "integrations_gcp" { + targets = discovery.relabel.integrations_gcp.output + forward_to = [prometheus.remote_write.metrics_default.receiver] + job_name = "integrations/gcp" +} diff --git a/internal/converter/internal/staticconvert/testdata-v2/unsupported.river b/internal/converter/internal/staticconvert/testdata-v2/unsupported.river index c9585a88c5..c854f1bd9d 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/unsupported.river +++ b/internal/converter/internal/staticconvert/testdata-v2/unsupported.river @@ -16,10 +16,6 @@ loki.write "logs_log_config" { external_labels = {} } -loki.source.kubernetes_events "integrations_eventhandler" { - forward_to = [loki.write.logs_log_config.receiver] -} - faro.receiver "integrations_app_agent_receiver" { extra_log_labels = {} @@ -45,3 +41,7 @@ faro.receiver "integrations_app_agent_receiver" { traces = [] } } + +loki.source.kubernetes_events "integrations_eventhandler" { + forward_to = [loki.write.logs_log_config.receiver] +} diff --git a/internal/converter/internal/staticconvert/testdata/integrations.river b/internal/converter/internal/staticconvert/testdata/integrations.river index 201f5e99e1..0c7bdaee61 100644 --- a/internal/converter/internal/staticconvert/testdata/integrations.river +++ b/internal/converter/internal/staticconvert/testdata/integrations.river @@ -324,33 +324,6 @@ prometheus.scrape "integrations_elasticsearch_exporter" { } } -prometheus.exporter.gcp "integrations_gcp_exporter" { - project_ids = [""] - metrics_prefixes = ["loadbalancing.googleapis.com/https/request_bytes_count", "loadbalancing.googleapis.com/https/total_latencies"] - extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] -} - -discovery.relabel "integrations_gcp_exporter" { - targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets - - rule { - target_label = "job" - replacement = "integrations/gcp_exporter" - } -} - -prometheus.scrape "integrations_gcp_exporter" { - targets = discovery.relabel.integrations_gcp_exporter.output - forward_to = [prometheus.remote_write.integrations.receiver] - job_name = "integrations/gcp_exporter" - - tls_config { - ca_file = "/something7.cert" - cert_file = "/something8.cert" - key_file = "/something9.cert" - } -} - prometheus.exporter.github "integrations_github_exporter" { repositories = ["grafana/agent", "grafana/agent-modules"] api_token = "ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL" @@ -780,3 +753,30 @@ prometheus.scrape "integrations_statsd_exporter" { key_file = "/something9.cert" } } + +prometheus.exporter.gcp "integrations_gcp_exporter" { + project_ids = [""] + metrics_prefixes = ["loadbalancing.googleapis.com/https/request_bytes_count", "loadbalancing.googleapis.com/https/total_latencies"] + extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] +} + +discovery.relabel "integrations_gcp_exporter" { + targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/gcp_exporter" + } +} + +prometheus.scrape "integrations_gcp_exporter" { + targets = discovery.relabel.integrations_gcp_exporter.output + forward_to = [prometheus.remote_write.integrations.receiver] + job_name = "integrations/gcp_exporter" + + tls_config { + ca_file = "/something7.cert" + cert_file = "/something8.cert" + key_file = "/something9.cert" + } +} diff --git a/internal/converter/internal/staticconvert/testdata/integrations.yaml b/internal/converter/internal/staticconvert/testdata/integrations.yaml index ced11cf6d9..337eaa01cf 100644 --- a/internal/converter/internal/staticconvert/testdata/integrations.yaml +++ b/internal/converter/internal/staticconvert/testdata/integrations.yaml @@ -148,7 +148,7 @@ integrations: scrape_integration: true postgres_exporter: enabled: true - data_source_names: + data_source_names: - postgres://postgres:password@localhost:5432/postgres?sslmode=disable relabel_configs: - source_labels: [__address__] diff --git a/internal/static/agentctl/sync.go b/internal/static/agentctl/sync.go deleted file mode 100644 index 3246405921..0000000000 --- a/internal/static/agentctl/sync.go +++ /dev/null @@ -1,136 +0,0 @@ -package agentctl - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/client" - "github.com/grafana/agent/internal/static/metrics/instance" -) - -// ConfigSync loads YAML files from a directory and syncs them to the -// provided PrometheusClient API. All YAML files will be synced and -// must be valid. -// -// The base name of the YAML file (i.e., without the file extension) -// is used as the config name. -// -// ConfigSync will completely overwrite the set of active configs -// present in the provided PrometheusClient - configs present in the -// API but not in the directory will be deleted. -func ConfigSync(logger log.Logger, cli client.PrometheusClient, dir string, dryRun bool) error { - if logger == nil { - logger = log.NewNopLogger() - } - - ctx := context.Background() - cfgs, err := ConfigsFromDirectory(dir) - if err != nil { - return err - } - - if dryRun { - level.Info(logger).Log("msg", "config files validated successfully") - return nil - } - - uploaded := make(map[string]struct{}, len(cfgs)) - var hadErrors bool - - for _, cfg := range cfgs { - level.Info(logger).Log("msg", "uploading config", "name", cfg.Name) - err := cli.PutConfiguration(ctx, cfg.Name, cfg) - if err != nil { - level.Error(logger).Log("msg", "failed to upload config", "name", cfg.Name, "err", err) - hadErrors = true - } - uploaded[cfg.Name] = struct{}{} - } - - existing, err := cli.ListConfigs(ctx) - if err != nil { - return fmt.Errorf("could not list configs: %w", err) - } - - // Delete configs from the existing API list that we didn't upload. - for _, existing := range existing.Configs { - if _, existsLocally := uploaded[existing]; !existsLocally { - level.Info(logger).Log("msg", "deleting config", "name", existing) - err := cli.DeleteConfiguration(ctx, existing) - if err != nil { - level.Error(logger).Log("msg", "failed to delete outdated config", "name", existing, "err", err) - hadErrors = true - } - } - } - - if hadErrors { - return errors.New("one or more configurations failed to be modified; check the logs for more details") - } - - return nil -} - -// ConfigsFromDirectory parses all YAML files from a directory and -// loads each as an instance.Config. -func ConfigsFromDirectory(dir string) ([]*instance.Config, error) { - var files []string - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - if dir == path { - return nil - } - return filepath.SkipDir - } - - if strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") { - files = append(files, path) - } - return nil - }) - if err != nil { - return nil, err - } - - var configs []*instance.Config - for _, file := range files { - cfg, err := configFromFile(file) - if err != nil { - return nil, err - } - configs = append(configs, cfg) - } - - return configs, nil -} - -func configFromFile(path string) (*instance.Config, error) { - var ( - fileName = filepath.Base(path) - configName = strings.TrimSuffix(fileName, filepath.Ext(fileName)) - ) - - f, err := os.Open(path) - if f != nil { - defer f.Close() - } - if err != nil { - return nil, err - } - - cfg, err := instance.UnmarshalConfig(f) - if err != nil { - return nil, err - } - cfg.Name = configName - return cfg, nil -} diff --git a/internal/static/agentctl/sync_test.go b/internal/static/agentctl/sync_test.go deleted file mode 100644 index 8cd490256c..0000000000 --- a/internal/static/agentctl/sync_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package agentctl - -import ( - "context" - "errors" - "testing" - - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/stretchr/testify/require" -) - -func TestConfigSync_EmptyStore(t *testing.T) { - cli := &mockFuncPromClient{} - cli.ListConfigsFunc = func(_ context.Context) (*configapi.ListConfigurationsResponse, error) { - return &configapi.ListConfigurationsResponse{}, nil - } - - var putConfigs []string - cli.PutConfigurationFunc = func(_ context.Context, name string, _ *instance.Config) error { - putConfigs = append(putConfigs, name) - return nil - } - - err := ConfigSync(nil, cli, "./testdata", false) - require.NoError(t, err) - - expect := []string{ - "agent-1", - "agent-2", - "agent-3", - } - require.Equal(t, expect, putConfigs) -} - -func TestConfigSync_PrepopulatedStore(t *testing.T) { - cli := &mockFuncPromClient{} - cli.ListConfigsFunc = func(_ context.Context) (*configapi.ListConfigurationsResponse, error) { - return &configapi.ListConfigurationsResponse{ - Configs: []string{"delete-a", "agent-1", "delete-b", "delete-c"}, - }, nil - } - - var putConfigs []string - cli.PutConfigurationFunc = func(_ context.Context, name string, _ *instance.Config) error { - putConfigs = append(putConfigs, name) - return nil - } - - var deletedConfigs []string - cli.DeleteConfigurationFunc = func(_ context.Context, name string) error { - deletedConfigs = append(deletedConfigs, name) - return nil - } - - err := ConfigSync(nil, cli, "./testdata", false) - require.NoError(t, err) - - expectUpdated := []string{ - "agent-1", - "agent-2", - "agent-3", - } - require.Equal(t, expectUpdated, putConfigs) - - expectDeleted := []string{ - "delete-a", - "delete-b", - "delete-c", - } - require.Equal(t, expectDeleted, deletedConfigs) -} - -func TestConfigSync_DryRun(t *testing.T) { - cli := &mockFuncPromClient{} - cli.ListConfigsFunc = func(_ context.Context) (*configapi.ListConfigurationsResponse, error) { - return &configapi.ListConfigurationsResponse{ - Configs: []string{"delete-a", "agent-1", "delete-b", "delete-c"}, - }, nil - } - - cli.PutConfigurationFunc = func(_ context.Context, name string, _ *instance.Config) error { - t.FailNow() - return nil - } - - cli.DeleteConfigurationFunc = func(_ context.Context, name string) error { - t.FailNow() - return nil - } - - err := ConfigSync(nil, cli, "./testdata", true) - require.NoError(t, err) -} - -type mockFuncPromClient struct { - InstancesFunc func(ctx context.Context) ([]string, error) - ListConfigsFunc func(ctx context.Context) (*configapi.ListConfigurationsResponse, error) - GetConfigurationFunc func(ctx context.Context, name string) (*instance.Config, error) - PutConfigurationFunc func(ctx context.Context, name string, cfg *instance.Config) error - DeleteConfigurationFunc func(ctx context.Context, name string) error -} - -func (m mockFuncPromClient) Instances(ctx context.Context) ([]string, error) { - if m.InstancesFunc != nil { - return m.InstancesFunc(ctx) - } - return nil, errors.New("not implemented") -} - -func (m mockFuncPromClient) ListConfigs(ctx context.Context) (*configapi.ListConfigurationsResponse, error) { - if m.ListConfigsFunc != nil { - return m.ListConfigsFunc(ctx) - } - return nil, errors.New("not implemented") -} - -func (m mockFuncPromClient) GetConfiguration(ctx context.Context, name string) (*instance.Config, error) { - if m.GetConfigurationFunc != nil { - return m.GetConfigurationFunc(ctx, name) - } - return nil, errors.New("not implemented") -} - -func (m mockFuncPromClient) PutConfiguration(ctx context.Context, name string, cfg *instance.Config) error { - if m.PutConfigurationFunc != nil { - return m.PutConfigurationFunc(ctx, name, cfg) - } - return errors.New("not implemented") -} - -func (m mockFuncPromClient) DeleteConfiguration(ctx context.Context, name string) error { - if m.DeleteConfigurationFunc != nil { - return m.DeleteConfigurationFunc(ctx, name) - } - return errors.New("not implemented") -} diff --git a/internal/static/agentctl/testdata/agent-1.yaml b/internal/static/agentctl/testdata/agent-1.yaml deleted file mode 100644 index d62ce80496..0000000000 --- a/internal/static/agentctl/testdata/agent-1.yaml +++ /dev/null @@ -1,12 +0,0 @@ -host_filter: false -write_stale_on_shutdown: true -scrape_configs: - - job_name: agent-1 - static_configs: - - targets: ['agent-1:12345'] - labels: - cluster: 'testdata' - origin: 'agent' - container: 'agent-1' -remote_write: - - url: http://cortex:9009/api/prom/push diff --git a/internal/static/agentctl/testdata/agent-2.yaml b/internal/static/agentctl/testdata/agent-2.yaml deleted file mode 100644 index d38252fb52..0000000000 --- a/internal/static/agentctl/testdata/agent-2.yaml +++ /dev/null @@ -1,12 +0,0 @@ -host_filter: false -write_stale_on_shutdown: true -scrape_configs: - - job_name: agent-2 - static_configs: - - targets: ['agent-2:12345'] - labels: - cluster: 'testdata' - origin: 'agent' - container: 'agent-2' -remote_write: - - url: http://cortex:9009/api/prom/push diff --git a/internal/static/agentctl/testdata/agent-3.yaml b/internal/static/agentctl/testdata/agent-3.yaml deleted file mode 100644 index 9312e87078..0000000000 --- a/internal/static/agentctl/testdata/agent-3.yaml +++ /dev/null @@ -1,12 +0,0 @@ -host_filter: false -write_stale_on_shutdown: true -scrape_configs: - - job_name: agent-3 - static_configs: - - targets: ['agent-3:12345'] - labels: - cluster: 'testdata' - origin: 'agent' - container: 'agent-3' -remote_write: - - url: http://cortex:9009/api/prom/push diff --git a/internal/static/agentproto/agent.pb.go b/internal/static/agentproto/agent.pb.go deleted file mode 100644 index 2f5c8bcfff..0000000000 --- a/internal/static/agentproto/agent.pb.go +++ /dev/null @@ -1,416 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: agent.proto - -package agentproto - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ReshardRequest struct { -} - -func (m *ReshardRequest) Reset() { *m = ReshardRequest{} } -func (*ReshardRequest) ProtoMessage() {} -func (*ReshardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_56ede974c0020f77, []int{0} -} -func (m *ReshardRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReshardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReshardRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReshardRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReshardRequest.Merge(m, src) -} -func (m *ReshardRequest) XXX_Size() int { - return m.Size() -} -func (m *ReshardRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReshardRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReshardRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ReshardRequest)(nil), "agentproto.ReshardRequest") -} - -func init() { proto.RegisterFile("agent.proto", fileDescriptor_56ede974c0020f77) } - -var fileDescriptor_56ede974c0020f77 = []byte{ - // 220 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0x4c, 0x4f, 0xcd, - 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x02, 0x73, 0xc0, 0x6c, 0x29, 0xe9, 0xf4, - 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x7d, 0x30, 0x2f, 0xa9, 0x34, 0x4d, 0x3f, 0x35, 0xb7, 0xa0, 0xa4, - 0x12, 0xa2, 0x50, 0x49, 0x80, 0x8b, 0x2f, 0x28, 0xb5, 0x38, 0x23, 0xb1, 0x28, 0x25, 0x28, 0xb5, - 0xb0, 0x34, 0xb5, 0xb8, 0xc4, 0x28, 0x80, 0x8b, 0x3f, 0x38, 0xb9, 0x28, 0xb1, 0x20, 0x33, 0x2f, - 0x3d, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0xc8, 0x96, 0x8b, 0x1d, 0xaa, 0x48, 0x48, 0x4a, - 0x0f, 0x61, 0xb2, 0x1e, 0xaa, 0x4e, 0x29, 0x31, 0x3d, 0x88, 0x4d, 0x7a, 0x30, 0x9b, 0xf4, 0x5c, - 0x41, 0x36, 0x39, 0xc5, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, - 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, - 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, - 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x52, 0x4f, 0xcf, 0x2c, - 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0x2f, 0x4a, 0x4c, 0x4b, 0xcc, 0x4b, 0xd4, - 0x07, 0xdb, 0xad, 0x5f, 0x90, 0x9d, 0xae, 0x8f, 0x70, 0x45, 0x12, 0x1b, 0x98, 0x32, 0x06, 0x04, - 0x00, 0x00, 0xff, 0xff, 0x15, 0xe9, 0x8a, 0xfc, 0x01, 0x01, 0x00, 0x00, -} - -func (this *ReshardRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReshardRequest) - if !ok { - that2, ok := that.(ReshardRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *ReshardRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&agentproto.ReshardRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringAgent(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ScrapingServiceClient is the client API for ScrapingService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ScrapingServiceClient interface { - // Reshard tells the implementing service to reshard all of its running - // configs. - Reshard(ctx context.Context, in *ReshardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type scrapingServiceClient struct { - cc *grpc.ClientConn -} - -func NewScrapingServiceClient(cc *grpc.ClientConn) ScrapingServiceClient { - return &scrapingServiceClient{cc} -} - -func (c *scrapingServiceClient) Reshard(ctx context.Context, in *ReshardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/agentproto.ScrapingService/Reshard", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ScrapingServiceServer is the server API for ScrapingService service. -type ScrapingServiceServer interface { - // Reshard tells the implementing service to reshard all of its running - // configs. - Reshard(context.Context, *ReshardRequest) (*emptypb.Empty, error) -} - -// UnimplementedScrapingServiceServer can be embedded to have forward compatible implementations. -type UnimplementedScrapingServiceServer struct { -} - -func (*UnimplementedScrapingServiceServer) Reshard(ctx context.Context, req *ReshardRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Reshard not implemented") -} - -func RegisterScrapingServiceServer(s *grpc.Server, srv ScrapingServiceServer) { - s.RegisterService(&_ScrapingService_serviceDesc, srv) -} - -func _ScrapingService_Reshard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReshardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ScrapingServiceServer).Reshard(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/agentproto.ScrapingService/Reshard", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ScrapingServiceServer).Reshard(ctx, req.(*ReshardRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ScrapingService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "agentproto.ScrapingService", - HandlerType: (*ScrapingServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Reshard", - Handler: _ScrapingService_Reshard_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "agent.proto", -} - -func (m *ReshardRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReshardRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReshardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintAgent(dAtA []byte, offset int, v uint64) int { - offset -= sovAgent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ReshardRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovAgent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAgent(x uint64) (n int) { - return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ReshardRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReshardRequest{`, - `}`, - }, "") - return s -} -func valueToStringAgent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ReshardRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReshardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReshardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAgent(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAgent - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAgent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAgent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAgent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAgent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAgent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAgent = fmt.Errorf("proto: unexpected end of group") -) diff --git a/internal/static/agentproto/agent.proto b/internal/static/agentproto/agent.proto deleted file mode 100644 index 405f7779c4..0000000000 --- a/internal/static/agentproto/agent.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package agentproto; -option go_package = "github.com/grafana/agent/internal/static/agentproto"; - -import "google/protobuf/empty.proto"; - -// ScrapingService holds methods that can be called against a Prometheus -// Scraping Service instance. -// -// These methods are only available when the agent config file has enabled the -// scraping service mode. If the scraping service mode is not enabling, -// invoking any of the RPCs here will return a not found error. -service ScrapingService { - // Reshard tells the implementing service to reshard all of its running - // configs. - rpc Reshard(ReshardRequest) returns (google.protobuf.Empty); -} - -message ReshardRequest {} diff --git a/internal/static/agentproto/func.go b/internal/static/agentproto/func.go deleted file mode 100644 index 64bf9cc204..0000000000 --- a/internal/static/agentproto/func.go +++ /dev/null @@ -1,21 +0,0 @@ -package agentproto - -import ( - "context" - - empty "github.com/golang/protobuf/ptypes/empty" -) - -// FuncScrapingServiceServer is an implementation of ScrapingServiceServer that -// uses function fields to implement the interface. Useful for tests. -type FuncScrapingServiceServer struct { - ReshardFunc func(context.Context, *ReshardRequest) (*empty.Empty, error) -} - -// Reshard implements ScrapingServiceServer. -func (f *FuncScrapingServiceServer) Reshard(ctx context.Context, req *ReshardRequest) (*empty.Empty, error) { - if f.ReshardFunc != nil { - return f.ReshardFunc(ctx, req) - } - panic("ReshardFunc is nil") -} diff --git a/internal/static/agentproto/gen.go b/internal/static/agentproto/gen.go deleted file mode 100644 index 591102b899..0000000000 --- a/internal/static/agentproto/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -package agentproto - -//go:generate protoc --gogoslick_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,plugins=grpc,paths=source_relative:./ ./agent.proto diff --git a/internal/static/client/client.go b/internal/static/client/client.go deleted file mode 100644 index 68048cbce3..0000000000 --- a/internal/static/client/client.go +++ /dev/null @@ -1,179 +0,0 @@ -// Package client provides a client interface to the Agent HTTP -// API. -package client - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/grafana/agent/internal/static/metrics/instance" - "gopkg.in/yaml.v2" -) - -// Client is a collection of all subsystem clients. -type Client struct { - PrometheusClient -} - -// New creates a new Client. -func New(addr string) *Client { - return &Client{ - PrometheusClient: &prometheusClient{addr: addr}, - } -} - -// PrometheusClient is the client interface to the API exposed by the -// Prometheus subsystem of the Grafana Agent. -type PrometheusClient interface { - // Instances runs the list of currently running instances. - Instances(ctx context.Context) ([]string, error) - - // The following methods are for the scraping service mode - // only and will fail when not enabled on the Agent. - - // ListConfigs runs the list of instance configs stored in the config - // management KV store. - ListConfigs(ctx context.Context) (*configapi.ListConfigurationsResponse, error) - - // GetConfiguration returns a named configuration from the config - // management KV store. - GetConfiguration(ctx context.Context, name string) (*instance.Config, error) - - // PutConfiguration adds or updates a named configuration into the - // config management KV store. - PutConfiguration(ctx context.Context, name string, cfg *instance.Config) error - - // DeleteConfiguration removes a named configuration from the config - // management KV store. - DeleteConfiguration(ctx context.Context, name string) error -} - -type prometheusClient struct { - addr string -} - -func (c *prometheusClient) Instances(ctx context.Context) ([]string, error) { - url := fmt.Sprintf("%s/agent/api/v1/metrics/instances", c.addr) - - resp, err := c.doRequest(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - - var data []string - err = unmarshalPrometheusAPIResponse(resp.Body, &data) - return data, err -} - -func (c *prometheusClient) ListConfigs(ctx context.Context) (*configapi.ListConfigurationsResponse, error) { - url := fmt.Sprintf("%s/agent/api/v1/configs", c.addr) - - resp, err := c.doRequest(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - - var data configapi.ListConfigurationsResponse - err = unmarshalPrometheusAPIResponse(resp.Body, &data) - return &data, err -} - -func (c *prometheusClient) GetConfiguration(ctx context.Context, name string) (*instance.Config, error) { - url := fmt.Sprintf("%s/agent/api/v1/configs/%s", c.addr, name) - - resp, err := c.doRequest(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - - var data configapi.GetConfigurationResponse - if err := unmarshalPrometheusAPIResponse(resp.Body, &data); err != nil { - return nil, err - } - - var config instance.Config - err = yaml.NewDecoder(strings.NewReader(data.Value)).Decode(&config) - return &config, err -} - -func (c *prometheusClient) PutConfiguration(ctx context.Context, name string, cfg *instance.Config) error { - url := fmt.Sprintf("%s/agent/api/v1/config/%s", c.addr, name) - - bb, err := instance.MarshalConfig(cfg, false) - if err != nil { - return err - } - - resp, err := c.doRequest(ctx, "POST", url, bytes.NewReader(bb)) - if err != nil { - return err - } - - return unmarshalPrometheusAPIResponse(resp.Body, nil) -} - -func (c *prometheusClient) DeleteConfiguration(ctx context.Context, name string) error { - url := fmt.Sprintf("%s/agent/api/v1/config/%s", c.addr, name) - - resp, err := c.doRequest(ctx, "DELETE", url, nil) - if err != nil { - return err - } - - return unmarshalPrometheusAPIResponse(resp.Body, nil) -} - -func (c *prometheusClient) doRequest(ctx context.Context, method string, url string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url, body) - if err != nil { - return nil, err - } - return http.DefaultClient.Do(req) -} - -// unmarshalPrometheusAPIResponse will unmarshal a response from the Prometheus -// subsystem API. -// -// r will be closed after this method is called. -func unmarshalPrometheusAPIResponse(r io.ReadCloser, v interface{}) error { - defer func() { - _ = r.Close() - }() - - resp := struct { - Status string `json:"status"` - Data json.RawMessage `json:"data"` - }{} - - err := json.NewDecoder(r).Decode(&resp) - if err != nil { - return fmt.Errorf("could not read response: %w", err) - } - - if v != nil && resp.Status == "success" { - err := json.Unmarshal(resp.Data, v) - if err != nil { - return fmt.Errorf("unmarshaling response: %w", err) - } - } else if resp.Status == "error" { - var errResp configapi.ErrorResponse - err := json.Unmarshal(resp.Data, &errResp) - if err != nil { - return fmt.Errorf("unmarshaling error: %w", err) - } - - return fmt.Errorf("%s", errResp.Error) - } - - if resp.Status != "success" && resp.Status != "error" { - return fmt.Errorf("unknown API response status: %s", resp.Status) - } - - return nil -} diff --git a/internal/static/config/agent_management_remote_config_test.go b/internal/static/config/agent_management_remote_config_test.go deleted file mode 100644 index 820801cf70..0000000000 --- a/internal/static/config/agent_management_remote_config_test.go +++ /dev/null @@ -1,364 +0,0 @@ -package config - -import ( - "testing" - "time" - - process_exporter "github.com/grafana/agent/internal/static/integrations/process_exporter" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" -) - -func TestBuildRemoteConfig(t *testing.T) { - baseConfig := ` -server: - log_level: debug -` - metricsSnippets := []Snippet{{ - Config: ` -metrics_scrape_configs: - - job_name: 'prometheus' - scrape_interval: 15s - static_configs: - - targets: ['localhost:9090'] -`, - }} - logsSnippets := []Snippet{{ - Config: ` -logs_scrape_configs: - - job_name: 'loki' - static_configs: - - targets: ['localhost:3100'] -`, - }} - integrationSnippets := []Snippet{{ - Config: ` -integration_configs: - agent: - enabled: true - relabel_configs: - - action: replace - source_labels: - - agent_hostname - target_label: instance -`, - }} - - allSnippets := []Snippet{} - allSnippets = append(allSnippets, metricsSnippets...) - allSnippets = append(allSnippets, logsSnippets...) - allSnippets = append(allSnippets, integrationSnippets...) - - t.Run("only metrics snippets provided", func(t *testing.T) { - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: metricsSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, len(c.Metrics.Configs), 1) - require.Empty(t, c.Logs) - require.Empty(t, c.Integrations.ConfigV1.Integrations) - }) - - t.Run("only log snippets provided", func(t *testing.T) { - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: logsSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, len(c.Logs.Configs), 1) - require.Empty(t, c.Metrics.Configs) - require.Empty(t, c.Integrations.ConfigV1.Integrations) - }) - - t.Run("only integration snippets provided", func(t *testing.T) { - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: integrationSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Empty(t, c.Metrics.Configs) - require.Empty(t, c.Logs) - require.Equal(t, 1, len(c.Integrations.ConfigV1.Integrations)) - }) - - t.Run("base with already logs, metrics and integrations provided", func(t *testing.T) { - fullConfig := ` -metrics: - configs: - - name: default - scrape_configs: - - job_name: default-prom - static_configs: - - targets: - - localhost:9090 -logs: - positions_directory: /tmp/grafana-agent-positions - configs: - - name: default - scrape_configs: - - job_name: default-loki - static_configs: - - targets: - - localhost:3100 -integrations: - node_exporter: - enabled: true -` - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(fullConfig), - Snippets: allSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, len(c.Logs.Configs), 2) - require.Equal(t, len(c.Metrics.Configs), 2) - require.Equal(t, 2, len(c.Integrations.ConfigV1.Integrations)) - }) - - t.Run("do not override integrations defined in base config with the ones defined in snippets", func(t *testing.T) { - baseConfig := ` -integrations: - node_exporter: - enabled: false -` - - snippets := []Snippet{{ - Config: ` -integration_configs: - node_exporter: - enabled: true`, - }} - - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: snippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Integrations.ConfigV1.Integrations)) - require.False(t, c.Integrations.ConfigV1.Integrations[0].Common.Enabled) - }) - - t.Run("all snippets provided", func(t *testing.T) { - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: allSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Logs.Configs)) - require.Equal(t, 1, len(c.Metrics.Configs)) - require.Equal(t, 1, len(c.Integrations.ConfigV1.Integrations)) - - // check some fields to make sure the config was parsed correctly - require.Equal(t, "prometheus", c.Metrics.Configs[0].ScrapeConfigs[0].JobName) - require.Equal(t, "loki", c.Logs.Configs[0].ScrapeConfig[0].JobName) - require.Equal(t, "agent", c.Integrations.ConfigV1.Integrations[0].Name()) - - // make sure defaults for metric snippets are applied - require.Equal(t, instance.DefaultConfig.WALTruncateFrequency, c.Metrics.Configs[0].WALTruncateFrequency) - require.Equal(t, instance.DefaultConfig.HostFilter, c.Metrics.Configs[0].HostFilter) - require.Equal(t, instance.DefaultConfig.MinWALTime, c.Metrics.Configs[0].MinWALTime) - require.Equal(t, instance.DefaultConfig.MaxWALTime, c.Metrics.Configs[0].MaxWALTime) - require.Equal(t, instance.DefaultConfig.RemoteFlushDeadline, c.Metrics.Configs[0].RemoteFlushDeadline) - require.Equal(t, instance.DefaultConfig.WriteStaleOnShutdown, c.Metrics.Configs[0].WriteStaleOnShutdown) - require.Equal(t, instance.DefaultGlobalConfig, c.Metrics.Global) - - // make sure defaults for log snippets are applied - require.Equal(t, 10*time.Second, c.Logs.Configs[0].PositionsConfig.SyncPeriod) - require.Equal(t, "", c.Logs.Configs[0].PositionsConfig.PositionsFile) - require.Equal(t, false, c.Logs.Configs[0].PositionsConfig.IgnoreInvalidYaml) - require.Equal(t, false, c.Logs.Configs[0].TargetConfig.Stdin) - - // make sure defaults for integration snippets are applied - require.Equal(t, true, c.Integrations.ConfigV1.ScrapeIntegrations) - require.Equal(t, true, c.Integrations.ConfigV1.UseHostnameLabel) - require.Equal(t, true, c.Integrations.ConfigV1.ReplaceInstanceLabel) - require.Equal(t, 5*time.Second, c.Integrations.ConfigV1.IntegrationRestartBackoff) - }) - - t.Run("template variables provided", func(t *testing.T) { - baseConfig := ` -server: - log_level: {{.log_level}} -` - templateInsideTemplate := "`{{ .template_inside_template }}`" - snippet := Snippet{ - Config: ` -integration_configs: - process_exporter: - enabled: true - process_names: - - name: "grafana-agent" - cmdline: - - 'grafana-agent' - - name: "{{.nonexistent.foo.bar.baz.bat}}" - cmdline: - - "{{ ` + templateInsideTemplate + ` }}" - # Custom process monitors - {{- range $key, $value := .process_exporter_processes }} - - name: "{{ $value.name }}" - cmdline: - - "{{ $value.cmdline }}" - {{if $value.exe}} - exe: - - "{{ $value.exe }}" - {{end}} - {{- end }} -`, - } - - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: []Snippet{snippet}, - AgentMetadata: AgentMetadata{ - TemplateVariables: map[string]any{ - "log_level": "debug", - "process_exporter_processes": []map[string]string{ - { - "name": "java_processes", - "cmdline": ".*/java", - }, - { - "name": "{{.ExeFull}}:{{.Matches.Cfgfile}}", - "cmdline": `-config.path\\s+(?P\\S+)`, - "exe": "/usr/local/bin/process-exporter", - }, - }, - }, - }, - } - - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Integrations.ConfigV1.Integrations)) - processExporterConfig := c.Integrations.ConfigV1.Integrations[0].Config.(*process_exporter.Config) - - require.Equal(t, 4, len(processExporterConfig.ProcessExporter)) - - require.Equal(t, "grafana-agent", processExporterConfig.ProcessExporter[0].Name) - require.Equal(t, "grafana-agent", processExporterConfig.ProcessExporter[0].CmdlineRules[0]) - require.Equal(t, 0, len(processExporterConfig.ProcessExporter[0].ExeRules)) - - require.Equal(t, "", processExporterConfig.ProcessExporter[1].Name) - require.Equal(t, "{{ .template_inside_template }}", processExporterConfig.ProcessExporter[1].CmdlineRules[0]) - require.Equal(t, 0, len(processExporterConfig.ProcessExporter[1].ExeRules)) - - require.Equal(t, "java_processes", processExporterConfig.ProcessExporter[2].Name) - require.Equal(t, ".*/java", processExporterConfig.ProcessExporter[2].CmdlineRules[0]) - require.Equal(t, 0, len(processExporterConfig.ProcessExporter[2].ExeRules)) - - require.Equal(t, "{{.ExeFull}}:{{.Matches.Cfgfile}}", processExporterConfig.ProcessExporter[3].Name) - require.Equal(t, `-config.path\s+(?P\S+)`, processExporterConfig.ProcessExporter[3].CmdlineRules[0]) - require.Equal(t, "/usr/local/bin/process-exporter", processExporterConfig.ProcessExporter[3].ExeRules[0]) - }) - - t.Run("no external labels provided", func(t *testing.T) { - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: allSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Logs.Configs)) - require.Empty(t, c.Metrics.Global.Prometheus.ExternalLabels) - }) - - t.Run("no external labels provided in remote config", func(t *testing.T) { - baseConfig := ` -server: - log_level: debug -metrics: - global: - external_labels: - foo: bar -logs: - global: - clients: - - external_labels: - foo: bar -` - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: allSnippets, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Logs.Configs)) - require.Equal(t, 1, len(c.Logs.Global.ClientConfigs)) - require.Equal(t, c.Logs.Global.ClientConfigs[0].ExternalLabels.LabelSet, model.LabelSet{"foo": "bar"}) - require.Equal(t, 1, len(c.Metrics.Global.Prometheus.ExternalLabels)) - require.Contains(t, c.Metrics.Global.Prometheus.ExternalLabels, labels.Label{Name: "foo", Value: "bar"}) - }) - - t.Run("external labels provided", func(t *testing.T) { - baseConfig := ` -server: - log_level: debug -metrics: - global: - remote_write: - - url: http://localhost:9090/api/prom/push -logs: - global: - clients: - - url: http://localhost:3100/loki/api/v1/push -` - - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: allSnippets, - AgentMetadata: AgentMetadata{ - ExternalLabels: map[string]string{ - "foo": "bar", - }, - }, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Logs.Configs)) - require.Equal(t, 1, len(c.Metrics.Configs)) - require.Equal(t, 1, len(c.Logs.Global.ClientConfigs)) - require.Equal(t, c.Logs.Global.ClientConfigs[0].ExternalLabels.LabelSet, model.LabelSet{"foo": "bar"}) - require.Contains(t, c.Metrics.Global.Prometheus.ExternalLabels, labels.Label{Name: "foo", Value: "bar"}) - }) - - t.Run("external labels don't override base config", func(t *testing.T) { - baseConfig := ` -server: - log_level: debug -metrics: - global: - external_labels: - foo: bar -logs: - global: - clients: - - external_labels: - foo: bar -` - rc := RemoteConfig{ - BaseConfig: BaseConfigContent(baseConfig), - Snippets: allSnippets, - AgentMetadata: AgentMetadata{ - ExternalLabels: map[string]string{ - "foo": "baz", - }, - }, - } - c, err := rc.BuildAgentConfig() - require.NoError(t, err) - require.Equal(t, 1, len(c.Logs.Configs)) - require.Equal(t, 1, len(c.Metrics.Configs)) - require.Equal(t, 1, len(c.Logs.Global.ClientConfigs)) - require.Equal(t, c.Logs.Global.ClientConfigs[0].ExternalLabels.LabelSet, model.LabelSet{"foo": "bar"}) - require.Contains(t, c.Metrics.Global.Prometheus.ExternalLabels, labels.Label{Name: "foo", Value: "bar"}) - require.NotContains(t, c.Metrics.Global.Prometheus.ExternalLabels, labels.Label{Name: "foo", Value: "baz"}) - }) -} diff --git a/internal/static/config/agentmanagement.go b/internal/static/config/agentmanagement.go index 9f91ba21a7..7b9c686d8a 100644 --- a/internal/static/config/agentmanagement.go +++ b/internal/static/config/agentmanagement.go @@ -1,202 +1,19 @@ package config import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" "errors" - "flag" "fmt" - "math/rand" - "net/url" - "os" - "path/filepath" "time" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/config/instrumentation" - "github.com/grafana/agent/internal/static/server" "github.com/prometheus/common/config" - "gopkg.in/yaml.v2" -) - -const ( - cacheFilename = "remote-config-cache.yaml" - apiPath = "/agent-management/api/agent/v2" - labelManagementEnabledHeader = "X-LabelManagementEnabled" - agentIDHeader = "X-AgentID" - agentNamespaceVersionHeader = "X-AgentNamespaceVersion" - agentInfoVersionHeader = "X-AgentInfoVersion" - acceptNotModifiedHeader = "X-AcceptHTTPNotModified" ) var ( - agentInfoVersion string - agentNamespaceVersion string defaultRemoteConfiguration = RemoteConfiguration{ AcceptHTTPNotModified: true, } ) -type remoteConfigProvider interface { - GetCachedRemoteConfig() ([]byte, error) - CacheRemoteConfig(remoteConfigBytes []byte) error - FetchRemoteConfig() ([]byte, error) - GetPollingInterval() time.Duration -} - -type remoteConfigHTTPProvider struct { - InitialConfig *AgentManagementConfig -} - -func newRemoteConfigHTTPProvider(c *Config) (*remoteConfigHTTPProvider, error) { - err := c.AgentManagement.Validate() - if err != nil { - return nil, err - } - return &remoteConfigHTTPProvider{ - InitialConfig: &c.AgentManagement, - }, nil -} - -type remoteConfigCache struct { - InitialConfigHash string `json:"initial_config_hash"` - Config string `json:"config"` -} - -func hashInitialConfig(am AgentManagementConfig) (string, error) { - marshalled, err := yaml.Marshal(am) - if err != nil { - return "", fmt.Errorf("could not marshal initial config: %w", err) - } - hashed := sha256.Sum256(marshalled) - return hex.EncodeToString(hashed[:]), nil -} - -// initialConfigHashCheck checks if the hash of initialConfig matches what is stored in configCache.InitialConfigHash. -// If an error is encountered while hashing initialConfig or the hashes do not match, initialConfigHashCheck -// returns an error. Otherwise, it returns nil. -func initialConfigHashCheck(initialConfig AgentManagementConfig, configCache remoteConfigCache) error { - initialConfigHash, err := hashInitialConfig(initialConfig) - if err != nil { - return err - } - - if !(configCache.InitialConfigHash == initialConfigHash) { - return errors.New("invalid remote config cache: initial config hashes don't match") - } - return nil -} - -// GetCachedRemoteConfig retrieves the cached remote config from the location specified -// in r.AgentManagement.CacheLocation -func (r remoteConfigHTTPProvider) GetCachedRemoteConfig() ([]byte, error) { - cachePath := filepath.Join(r.InitialConfig.RemoteConfiguration.CacheLocation, cacheFilename) - - var configCache remoteConfigCache - buf, err := os.ReadFile(cachePath) - - if err != nil { - return nil, fmt.Errorf("error reading remote config cache: %w", err) - } - - if err := json.Unmarshal(buf, &configCache); err != nil { - return nil, fmt.Errorf("error trying to load cached remote config from file: %w", err) - } - - if err = initialConfigHashCheck(*r.InitialConfig, configCache); err != nil { - return nil, err - } - - return []byte(configCache.Config), nil -} - -// CacheRemoteConfig caches the remote config to the location specified in -// r.AgentManagement.CacheLocation -func (r remoteConfigHTTPProvider) CacheRemoteConfig(remoteConfigBytes []byte) error { - cachePath := filepath.Join(r.InitialConfig.RemoteConfiguration.CacheLocation, cacheFilename) - initialConfigHash, err := hashInitialConfig(*r.InitialConfig) - if err != nil { - return err - } - configCache := remoteConfigCache{ - InitialConfigHash: initialConfigHash, - Config: string(remoteConfigBytes), - } - marshalled, err := json.Marshal(configCache) - if err != nil { - return fmt.Errorf("could not marshal remote config cache: %w", err) - } - return os.WriteFile(cachePath, marshalled, 0666) -} - -// FetchRemoteConfig fetches the raw bytes of the config from a remote API using -// the values in r.AgentManagement. -func (r remoteConfigHTTPProvider) FetchRemoteConfig() ([]byte, error) { - httpClientConfig := &r.InitialConfig.HTTPClientConfig - - dir, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("failed to get current working directory: %w", err) - } - httpClientConfig.SetDirectory(dir) - - remoteOpts := &remoteOpts{ - HTTPClientConfig: httpClientConfig, - } - - if r.InitialConfig.RemoteConfiguration.LabelManagementEnabled && r.InitialConfig.RemoteConfiguration.AgentID != "" { - remoteOpts.headers = map[string]string{ - labelManagementEnabledHeader: "1", - agentIDHeader: r.InitialConfig.RemoteConfiguration.AgentID, - } - - if agentNamespaceVersion != "" { - remoteOpts.headers[agentNamespaceVersionHeader] = agentNamespaceVersion - } - if agentInfoVersion != "" { - remoteOpts.headers[agentInfoVersionHeader] = agentInfoVersion - } - if r.InitialConfig.RemoteConfiguration.AcceptHTTPNotModified { - remoteOpts.headers[acceptNotModifiedHeader] = "1" - } - } - - url, err := r.InitialConfig.fullUrl() - if err != nil { - return nil, fmt.Errorf("error trying to create full url: %w", err) - } - rc, err := newRemoteProvider(url, remoteOpts) - if err != nil { - return nil, fmt.Errorf("error reading remote config: %w", err) - } - - bb, headers, err := rc.retrieve() - - // If the server returns a 304, return it and the caller will handle it. - var nme notModifiedError - if errors.Is(err, nme) { - return nil, nme - } - - if err != nil { - return nil, fmt.Errorf("error retrieving remote config: %w", err) - } - - nsVersion := headers.Get(agentNamespaceVersionHeader) - infoVersion := headers.Get(agentInfoVersionHeader) - if nsVersion != "" && infoVersion != "" { - agentNamespaceVersion = nsVersion - agentInfoVersion = infoVersion - } - - return bb, nil -} - -func (r remoteConfigHTTPProvider) GetPollingInterval() time.Duration { - return r.InitialConfig.PollingInterval -} - type labelMap map[string]string type RemoteConfiguration struct { @@ -226,131 +43,6 @@ type AgentManagementConfig struct { RemoteConfiguration RemoteConfiguration `yaml:"remote_configuration"` } -// getRemoteConfig gets the remote config specified in the initial config, falling back to a local, cached copy -// of the remote config if the request to the remote fails. If both fail, an empty config and an -// error will be returned. -func getRemoteConfig(expandEnvVars bool, configProvider remoteConfigProvider, log *server.Logger, fs *flag.FlagSet, retry bool) (*Config, error) { - remoteConfigBytes, err := configProvider.FetchRemoteConfig() - if errors.Is(err, notModifiedError{}) { - level.Info(log).Log("msg", "remote config has not changed since last fetch, using cached copy") - remoteConfigBytes, err = configProvider.GetCachedRemoteConfig() - } - if err != nil { - var retryAfterErr retryAfterError - if errors.As(err, &retryAfterErr) && retry { - // In the case that the server is telling us to retry after a time greater than our polling interval, - // the agent should sleep for the duration of the retry-after header. - // - // If the duration of the retry-after is lower than the polling interval, the agent will simply - // fall back to the cache and continue polling at the polling interval, effectively skipping - // this poll. - if retryAfterErr.retryAfter > configProvider.GetPollingInterval() { - level.Info(log).Log("msg", "received retry-after from API, sleeping and falling back to cache", "retry-after", retryAfterErr.retryAfter) - time.Sleep(retryAfterErr.retryAfter) - } else { - level.Info(log).Log("msg", "received retry-after from API, falling back to cache", "retry-after", retryAfterErr.retryAfter) - } - // Return the cached config, as this is the last known good config and a config must be returned here. - return getCachedRemoteConfig(expandEnvVars, configProvider, fs, log) - } - level.Error(log).Log("msg", "could not fetch from API, falling back to cache", "err", err) - return getCachedRemoteConfig(expandEnvVars, configProvider, fs, log) - } - - config, err := loadRemoteConfig(remoteConfigBytes, expandEnvVars, fs) - if err != nil { - level.Error(log).Log("msg", "could not load remote config, falling back to cache", "err", err) - return getCachedRemoteConfig(expandEnvVars, configProvider, fs, log) - } - - level.Info(log).Log("msg", "fetched and loaded remote config from API") - - if err = configProvider.CacheRemoteConfig(remoteConfigBytes); err != nil { - level.Error(log).Log("err", fmt.Errorf("could not cache config locally: %w", err)) - } - return config, nil -} - -// getCachedRemoteConfig gets the cached remote config, falling back to the default config if the cache is invalid or not found. -func getCachedRemoteConfig(expandEnvVars bool, configProvider remoteConfigProvider, fs *flag.FlagSet, log *server.Logger) (*Config, error) { - rc, err := configProvider.GetCachedRemoteConfig() - if err != nil { - level.Error(log).Log("msg", "could not get cached remote config, falling back to default (empty) config", "err", err) - d := DefaultConfig() - instrumentation.InstrumentAgentManagementConfigFallback("empty_config") - return &d, nil - } - instrumentation.InstrumentAgentManagementConfigFallback("cache") - return loadRemoteConfig(rc, expandEnvVars, fs) -} - -// loadRemoteConfig parses and validates the remote config, both syntactically and semantically. -func loadRemoteConfig(remoteConfigBytes []byte, expandEnvVars bool, fs *flag.FlagSet) (*Config, error) { - expandedRemoteConfigBytes, err := performEnvVarExpansion(remoteConfigBytes, expandEnvVars) - if err != nil { - instrumentation.InstrumentInvalidRemoteConfig("env_var_expansion") - return nil, fmt.Errorf("could not expand env vars for remote config: %w", err) - } - - remoteConfig, err := NewRemoteConfig(expandedRemoteConfigBytes) - if err != nil { - instrumentation.InstrumentInvalidRemoteConfig("invalid_yaml") - return nil, fmt.Errorf("could not unmarshal remote config: %w", err) - } - - config, err := remoteConfig.BuildAgentConfig() - if err != nil { - instrumentation.InstrumentInvalidRemoteConfig("invalid_remote_config") - return nil, fmt.Errorf("could not build agent config: %w", err) - } - - if err = config.Validate(fs); err != nil { - instrumentation.InstrumentInvalidRemoteConfig("semantically_invalid_agent_config") - return nil, fmt.Errorf("semantically invalid config received from the API: %w", err) - } - return config, nil -} - -// newRemoteConfigProvider creates a remoteConfigProvider based on the protocol -// specified in c.AgentManagement -func newRemoteConfigProvider(c *Config) (*remoteConfigHTTPProvider, error) { - switch p := c.AgentManagement.Protocol; { - case p == "https" || p == "http": - return newRemoteConfigHTTPProvider(c) - default: - return nil, fmt.Errorf("unsupported protocol for agent management api: %s", p) - } -} - -// fullUrl creates and returns the URL that should be used when querying the Agent Management API, -// including the namespace, base config id, and any labels that have been specified. -func (am *AgentManagementConfig) fullUrl() (string, error) { - fullPath, err := url.JoinPath(am.Protocol+"://", am.Host, apiPath, "namespace", am.RemoteConfiguration.Namespace, "remote_config") - if err != nil { - return "", fmt.Errorf("error trying to join url: %w", err) - } - u, err := url.Parse(fullPath) - if err != nil { - return "", fmt.Errorf("error trying to parse url: %w", err) - } - q := u.Query() - for label, value := range am.RemoteConfiguration.Labels { - q.Add(label, value) - } - u.RawQuery = q.Encode() - return u.String(), nil -} - -// SleepTime returns the duration in between config fetches. -func (am *AgentManagementConfig) SleepTime() time.Duration { - return am.PollingInterval -} - -// jitterTime returns a random duration in the range [0, am.PollingInterval). -func (am *AgentManagementConfig) JitterTime() time.Duration { - return time.Duration(rand.Int63n(int64(am.PollingInterval))) -} - // Validate checks that necessary portions of the config have been set. func (am *AgentManagementConfig) Validate() error { if am.HTTPClientConfig.BasicAuth == nil || am.HTTPClientConfig.BasicAuth.Username == "" || am.HTTPClientConfig.BasicAuth.PasswordFile == "" { diff --git a/internal/static/config/agentmanagement_remote_config.go b/internal/static/config/agentmanagement_remote_config.go deleted file mode 100644 index 6e658e7053..0000000000 --- a/internal/static/config/agentmanagement_remote_config.go +++ /dev/null @@ -1,179 +0,0 @@ -package config - -import ( - "bytes" - "text/template" - - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" - "github.com/prometheus/common/model" - pc "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/labels" - "gopkg.in/yaml.v2" -) - -type ( - RemoteConfig struct { - BaseConfig BaseConfigContent `json:"base_config" yaml:"base_config"` - Snippets []Snippet `json:"snippets" yaml:"snippets"` - AgentMetadata AgentMetadata `json:"agent_metadata,omitempty" yaml:"agent_metadata,omitempty"` - } - - // BaseConfigContent is the content of a base config - BaseConfigContent string - - // Snippet is a snippet of configuration returned by the config API. - Snippet struct { - // Config is the snippet of config to be included. - Config string `json:"config" yaml:"config"` - } - - AgentMetadata struct { - ExternalLabels map[string]string `json:"external_labels,omitempty" yaml:"external_labels,omitempty"` - TemplateVariables map[string]any `json:"template_variables,omitempty" yaml:"template_variables,omitempty"` - } - - // SnippetContent defines the internal structure of a snippet configuration. - SnippetContent struct { - // MetricsScrapeConfigs is a YAML containing list of metrics scrape configs. - MetricsScrapeConfigs []*pc.ScrapeConfig `yaml:"metrics_scrape_configs,omitempty"` - - // LogsScrapeConfigs is a YAML containing list of logs scrape configs. - LogsScrapeConfigs []scrapeconfig.Config `yaml:"logs_scrape_configs,omitempty"` - - // IntegrationConfigs is a YAML containing list of integrations. - IntegrationConfigs integrations.ManagerConfig `yaml:"integration_configs,omitempty"` - } -) - -func NewRemoteConfig(buf []byte) (*RemoteConfig, error) { - rc := &RemoteConfig{} - err := yaml.Unmarshal(buf, rc) - if err != nil { - return nil, err - } - return rc, nil -} - -// BuildAgentConfig builds an agent configuration from a base config and a list of snippets -func (rc *RemoteConfig) BuildAgentConfig() (*Config, error) { - baseConfig, err := evaluateTemplate(string(rc.BaseConfig), rc.AgentMetadata.TemplateVariables) - if err != nil { - return nil, err - } - - c := DefaultConfig() - err = yaml.Unmarshal([]byte(baseConfig), &c) - if err != nil { - return nil, err - } - - // For now Agent Management only supports integrations v1 - if err := c.Integrations.setVersion(IntegrationsVersion1); err != nil { - return nil, err - } - - err = appendSnippets(&c, rc.Snippets, rc.AgentMetadata.TemplateVariables) - if err != nil { - return nil, err - } - appendExternalLabels(&c, rc.AgentMetadata.ExternalLabels) - return &c, nil -} - -func appendSnippets(c *Config, snippets []Snippet, templateVars map[string]any) error { - metricsConfigs := instance.DefaultConfig - metricsConfigs.Name = "snippets" - logsConfigs := logs.InstanceConfig{ - Name: "snippets", - ScrapeConfig: []scrapeconfig.Config{}, - } - logsConfigs.Initialize() - integrationConfigs := integrations.DefaultManagerConfig() - - // Map used to identify if an integration is already configured and avoid overriding it - configuredIntegrations := map[string]bool{} - for _, itg := range c.Integrations.ConfigV1.Integrations { - configuredIntegrations[itg.Name()] = true - } - - for _, snippet := range snippets { - snippetConfig, err := evaluateTemplate(snippet.Config, templateVars) - if err != nil { - return err - } - - var snippetContent SnippetContent - err = yaml.Unmarshal([]byte(snippetConfig), &snippetContent) - if err != nil { - return err - } - metricsConfigs.ScrapeConfigs = append(metricsConfigs.ScrapeConfigs, snippetContent.MetricsScrapeConfigs...) - logsConfigs.ScrapeConfig = append(logsConfigs.ScrapeConfig, snippetContent.LogsScrapeConfigs...) - - for _, snip := range snippetContent.IntegrationConfigs.Integrations { - if _, ok := configuredIntegrations[snip.Name()]; !ok { - integrationConfigs.Integrations = append(integrationConfigs.Integrations, snip) - configuredIntegrations[snip.Name()] = true - } - } - } - - if len(metricsConfigs.ScrapeConfigs) > 0 { - c.Metrics.Configs = append(c.Metrics.Configs, metricsConfigs) - } - - if len(logsConfigs.ScrapeConfig) > 0 { - // rc.Config.Logs is initialized as nil, so we need to check if it's nil before appending - if c.Logs == nil { - c.Logs = &logs.Config{ - Configs: []*logs.InstanceConfig{}, - } - } - c.Logs.Configs = append(c.Logs.Configs, &logsConfigs) - } - - c.Integrations.ConfigV1.Integrations = append(c.Integrations.ConfigV1.Integrations, integrationConfigs.Integrations...) - return nil -} - -func appendExternalLabels(c *Config, externalLabels map[string]string) { - // Avoid doing anything if there are no external labels - if len(externalLabels) == 0 { - return - } - // Start off with the existing external labels, which will only be added to (not replaced) - metricsExternalLabels := c.Metrics.Global.Prometheus.ExternalLabels.Map() - for k, v := range externalLabels { - if _, ok := metricsExternalLabels[k]; !ok { - metricsExternalLabels[k] = v - } - } - - logsExternalLabels := make(model.LabelSet) - for k, v := range externalLabels { - logsExternalLabels[model.LabelName(k)] = model.LabelValue(v) - } - - c.Metrics.Global.Prometheus.ExternalLabels = labels.FromMap(metricsExternalLabels) - for i, cc := range c.Logs.Global.ClientConfigs { - c.Logs.Global.ClientConfigs[i].ExternalLabels.LabelSet = logsExternalLabels.Merge(cc.ExternalLabels.LabelSet) - } -} - -func evaluateTemplate(config string, templateVariables map[string]any) (string, error) { - tpl, err := template.New("config").Parse(config) - if err != nil { - return "", err - } - - var buf bytes.Buffer - err = tpl.Execute(&buf, templateVariables) - if err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/internal/static/config/agentmanagement_test.go b/internal/static/config/agentmanagement_test.go index 2cae67cfa6..28da655d27 100644 --- a/internal/static/config/agentmanagement_test.go +++ b/internal/static/config/agentmanagement_test.go @@ -1,57 +1,14 @@ package config import ( - "crypto/sha256" - "encoding/hex" - "errors" - "flag" "testing" "time" - "github.com/grafana/agent/internal/static/config/features" - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/util" "github.com/prometheus/common/config" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) -// testRemoteConfigProvider is an implementation of remoteConfigProvider that can be -// used for testing. It allows setting the values to return for both fetching the -// remote config bytes & errors as well as the cached config & errors. -type testRemoteConfigProvider struct { - InitialConfig *AgentManagementConfig - - fetchedConfigBytesToReturn []byte - fetchedConfigErrorToReturn error - fetchRemoteConfigCallCount int - - cachedConfigToReturn []byte - cachedConfigErrorToReturn error - getCachedConfigCallCount int - didCacheRemoteConfig bool -} - -func (t *testRemoteConfigProvider) GetCachedRemoteConfig() ([]byte, error) { - t.getCachedConfigCallCount += 1 - return t.cachedConfigToReturn, t.cachedConfigErrorToReturn -} - -func (t *testRemoteConfigProvider) FetchRemoteConfig() ([]byte, error) { - t.fetchRemoteConfigCallCount += 1 - return t.fetchedConfigBytesToReturn, t.fetchedConfigErrorToReturn -} - -func (t *testRemoteConfigProvider) CacheRemoteConfig(r []byte) error { - t.didCacheRemoteConfig = true - return nil -} - -func (t *testRemoteConfigProvider) GetPollingInterval() time.Duration { - return t.InitialConfig.PollingInterval -} - var validAgentManagementConfig = AgentManagementConfig{ Enabled: true, Host: "localhost:1234", @@ -71,8 +28,6 @@ var validAgentManagementConfig = AgentManagementConfig{ }, } -var cachedConfig = []byte(`{"base_config":"","snippets":[]}`) - func TestUnmarshalDefault(t *testing.T) { cfg := `host: "localhost:1234" protocol: "https" @@ -162,418 +117,3 @@ func TestValidateLabelManagement(t *testing.T) { cfg.RemoteConfiguration.AgentID = "test_agent_id" assert.NoError(t, cfg.Validate()) } - -func TestSleepTime(t *testing.T) { - cfg := ` -api_url: "http://localhost" -basic_auth: - username: "initial_user" -protocol: "http" -polling_interval: "1m" -remote_configuration: - namespace: "new_namespace" - cache_location: "/etc"` - - var am AgentManagementConfig - yaml.Unmarshal([]byte(cfg), &am) - assert.Equal(t, time.Minute, am.SleepTime()) -} - -func TestFuzzJitterTime(t *testing.T) { - am := validAgentManagementConfig - pollingInterval := 2 * time.Minute - am.PollingInterval = pollingInterval - - zero := time.Duration(0) - - for i := 0; i < 10_000; i++ { - j := am.JitterTime() - assert.GreaterOrEqual(t, j, zero) - assert.Less(t, j, pollingInterval) - } -} - -func TestFullUrl(t *testing.T) { - c := validAgentManagementConfig - actual, err := c.fullUrl() - assert.NoError(t, err) - assert.Equal(t, "https://localhost:1234/agent-management/api/agent/v2/namespace/test_namespace/remote_config?a=A&b=B", actual) -} - -func TestRemoteConfigHashCheck(t *testing.T) { - // not a truly valid Agent Management config, but used for testing against - // precomputed sha256 hash - ic := AgentManagementConfig{ - Protocol: "http", - } - marshalled, err := yaml.Marshal(ic) - require.NoError(t, err) - icHashBytes := sha256.Sum256(marshalled) - icHash := hex.EncodeToString(icHashBytes[:]) - - rcCache := remoteConfigCache{ - InitialConfigHash: icHash, - Config: "server:\\n log_level: debug", - } - - require.NoError(t, initialConfigHashCheck(ic, rcCache)) - rcCache.InitialConfigHash = "abc" - require.Error(t, initialConfigHashCheck(ic, rcCache)) - - differentIc := validAgentManagementConfig - require.Error(t, initialConfigHashCheck(differentIc, rcCache)) -} - -func TestNewRemoteConfigProvider_ValidInitialConfig(t *testing.T) { - invalidAgentManagementConfig := &AgentManagementConfig{ - Enabled: true, - Host: "localhost:1234", - HTTPClientConfig: config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: "test", - PasswordFile: "/test/path", - }, - }, - Protocol: "https", - PollingInterval: time.Minute, - RemoteConfiguration: RemoteConfiguration{ - Labels: labelMap{"b": "B", "a": "A"}, - Namespace: "test_namespace", - CacheLocation: "/test/path/", - }, - } - - cfg := Config{ - AgentManagement: *invalidAgentManagementConfig, - } - _, err := newRemoteConfigProvider(&cfg) - assert.NoError(t, err) -} - -func TestNewRemoteConfigProvider_InvalidProtocol(t *testing.T) { - invalidAgentManagementConfig := &AgentManagementConfig{ - Enabled: true, - Host: "localhost:1234", - HTTPClientConfig: config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: "test", - PasswordFile: "/test/path", - }, - }, - Protocol: "ws", - PollingInterval: time.Minute, - RemoteConfiguration: RemoteConfiguration{ - Labels: labelMap{"b": "B", "a": "A"}, - Namespace: "test_namespace", - CacheLocation: "/test/path/", - }, - } - - cfg := Config{ - AgentManagement: *invalidAgentManagementConfig, - } - _, err := newRemoteConfigProvider(&cfg) - assert.Error(t, err) -} - -func TestNewRemoteConfigHTTPProvider_InvalidInitialConfig(t *testing.T) { - // this is invalid because it is missing the password file - invalidAgentManagementConfig := &AgentManagementConfig{ - Enabled: true, - Host: "localhost:1234", - HTTPClientConfig: config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: "test", - }, - }, - Protocol: "https", - PollingInterval: time.Minute, - RemoteConfiguration: RemoteConfiguration{ - Labels: labelMap{"b": "B", "a": "A"}, - Namespace: "test_namespace", - CacheLocation: "/test/path/", - }, - } - - cfg := Config{ - AgentManagement: *invalidAgentManagementConfig, - } - _, err := newRemoteConfigHTTPProvider(&cfg) - assert.Error(t, err) -} - -func TestGetRemoteConfig_UnmarshallableRemoteConfig(t *testing.T) { - defaultCfg := DefaultConfig() - brokenCfg := `completely invalid config (maybe it got corrupted, maybe it was somehow set this way)` - - invalidCfgBytes := []byte(brokenCfg) - - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = invalidCfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - // flagset is required because some default values are extracted from it. - // In addition, some flags are defined as dependencies for validation - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.False(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is the cached one - // Note: Validate is required for the comparison as it mutates the config - expected := defaultCfg - expected.Validate(fs) - assert.True(t, util.CompareYAML(*cfg, expected)) -} - -func TestGetRemoteConfig_RemoteFetchFails(t *testing.T) { - defaultCfg := DefaultConfig() - - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigErrorToReturn = errors.New("connection refused") - testProvider.cachedConfigToReturn = cachedConfig - - // flagset is required because some default values are extracted from it. - // In addition, some flags are defined as dependencies for validation - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.False(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is the cached one - // Note: Validate is required for the comparison as it mutates the config - expected := defaultCfg - expected.Validate(fs) - assert.True(t, util.CompareYAML(*cfg, expected)) -} - -func TestGetRemoteConfig_SemanticallyInvalidBaseConfig(t *testing.T) { - defaultCfg := DefaultConfig() - - // this is semantically invalid because it has two scrape_configs with - // the same job_name - invalidConfig := ` -{ - "base_config": "metrics:\n configs:\n - name: Metrics Snippets\n scrape_configs:\n - job_name: 'prometheus'\n scrape_interval: 15s\n static_configs:\n - targets: ['localhost:12345']\n - job_name: 'prometheus'\n scrape_interval: 15s\n static_configs:\n - targets: ['localhost:12345']\n", - "snippets": [] -}` - invalidCfgBytes := []byte(invalidConfig) - - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = invalidCfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - // flagset is required because some default values are extracted from it. - // In addition, some flags are defined as dependencies for validation - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.False(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is the cached one - // Note: Validate is required for the comparison as it mutates the config - expected := defaultCfg - expected.Validate(fs) - assert.True(t, util.CompareYAML(*cfg, expected)) -} - -func TestGetRemoteConfig_InvalidSnippet(t *testing.T) { - defaultCfg := DefaultConfig() - - // this is semantically invalid because it has two scrape_configs with - // the same job_name - invalidConfig := ` -{ - "base_config": "server:\n log_level: info\n log_format: logfmt\n", - "snippets": [ - { - "config": "metrics_scrape_configs:\n- job_name: 'prometheus'\n- job_name: 'prometheus'\n" - } - ] -}` - invalidCfgBytes := []byte(invalidConfig) - - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = invalidCfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - // flagset is required because some default values are extracted from it. - // In addition, some flags are defined as dependencies for validation - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.False(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is the cached one - // Note: Validate is required for the comparison as it mutates the config - expected := defaultCfg - expected.Validate(fs) - assert.True(t, util.CompareYAML(*cfg, expected)) -} - -func TestGetRemoteConfig_EmptyBaseConfig(t *testing.T) { - defaultCfg := DefaultConfig() - - validConfig := ` -{ - "base_config": "", - "snippets": [] -}` - cfgBytes := []byte(validConfig) - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = cfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.True(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is not the cached one - assert.NotEqual(t, "debug", cfg.Server.LogLevel.String()) -} - -func TestGetRemoteConfig_ValidBaseConfig(t *testing.T) { - defaultCfg := DefaultConfig() - validConfig := ` -{ - "base_config": "server:\n log_level: debug\n log_format: logfmt\nlogs:\n positions_directory: /tmp\n global:\n clients:\n - basic_auth:\n password_file: key.txt\n username: 278220\n url: https://logs-prod-eu-west-0.grafana.net/loki/api/v1/push\nintegrations:\n agent:\n enabled: false\n", - "snippets": [ - { - "config": "metrics_scrape_configs:\n- job_name: 'prometheus'\n scrape_interval: 15s\n static_configs:\n - targets: ['localhost:12345']\nlogs_scrape_configs:\n- job_name: yologs\n static_configs:\n - targets: [localhost]\n labels:\n job: yologs\n __path__: /tmp/yo.log\n", - "selector": { - "hostname": "machine-1", - "team": "team-a" - } - } - ] -}` - cfgBytes := []byte(validConfig) - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = cfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.True(t, testProvider.didCacheRemoteConfig) - - // check that the returned config is not the cached one - assert.False(t, util.CompareYAML(*cfg, defaultCfg)) - - // check some fields to make sure the config was parsed correctly - assert.Equal(t, "debug", cfg.Server.LogLevel.String()) - assert.Equal(t, "278220", cfg.Logs.Global.ClientConfigs[0].Client.BasicAuth.Username) - assert.Equal(t, "prometheus", cfg.Metrics.Configs[0].ScrapeConfigs[0].JobName) - assert.Equal(t, "yologs", cfg.Logs.Configs[0].ScrapeConfig[0].JobName) - assert.Equal(t, 1, len(cfg.Integrations.ConfigV1.Integrations)) -} - -func TestGetRemoteConfig_ExpandsEnvVars(t *testing.T) { - defaultCfg := DefaultConfig() - validConfig := ` -{ - "base_config": "server:\n log_level: info\n log_format: ${LOG_FORMAT}\nlogs:\n positions_directory: /tmp\n global:\n clients:\n - basic_auth:\n password_file: key.txt\n username: 278220\n url: https://logs-prod-eu-west-0.grafana.net/loki/api/v1/push\nintegrations:\n agent:\n enabled: false\n", - "snippets": [ - { - "config": "metrics_scrape_configs:\n- job_name: 'prometheus'\n scrape_interval: ${SCRAPE_INTERVAL}\n static_configs:\n - targets: ['localhost:12345']\n", - "selector": { - "hostname": "machine-1", - "team": "team-a" - } - } - ] -}` - t.Setenv("SCRAPE_INTERVAL", "15s") - t.Setenv("LOG_FORMAT", "json") - - cfgBytes := []byte(validConfig) - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigBytesToReturn = cfgBytes - testProvider.cachedConfigToReturn = cachedConfig - - fs := flag.NewFlagSet("test", flag.ExitOnError) - var configExpandEnv bool - fs.BoolVar(&configExpandEnv, "config.expand-env", false, "") - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getRemoteConfig(true, &testProvider, logger, fs, false) - assert.NoError(t, err) - assert.Equal(t, "15s", cfg.Metrics.Configs[0].ScrapeConfigs[0].ScrapeInterval.String()) - assert.Equal(t, "json", cfg.Server.LogFormat) -} - -func TestGetCachedConfig_DefaultConfigFallback(t *testing.T) { - defaultCfg := DefaultConfig() - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.cachedConfigErrorToReturn = errors.New("no cached config") - - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - cfg, err := getCachedRemoteConfig(true, &testProvider, fs, logger) - assert.NoError(t, err) - - // check that the returned config is the default one - assert.True(t, util.CompareYAML(*cfg, defaultCfg)) -} - -func TestGetCachedConfig_RetryAfter(t *testing.T) { - defaultCfg := DefaultConfig() - am := validAgentManagementConfig - logger := server.NewLogger(defaultCfg.Server) - testProvider := testRemoteConfigProvider{InitialConfig: &am} - testProvider.fetchedConfigErrorToReturn = retryAfterError{retryAfter: time.Duration(0)} - testProvider.cachedConfigToReturn = cachedConfig - - fs := flag.NewFlagSet("test", flag.ExitOnError) - features.Register(fs, allFeatures) - defaultCfg.RegisterFlags(fs) - - _, err := getRemoteConfig(true, &testProvider, logger, fs, true) - assert.NoError(t, err) - assert.False(t, testProvider.didCacheRemoteConfig) - - // check that FetchRemoteConfig was called only once on the TestProvider - assert.Equal(t, 1, testProvider.fetchRemoteConfigCallCount) - - // the cached config should have been retrieved once, on the second - // attempt to fetch the remote config - assert.Equal(t, 1, testProvider.getCachedConfigCallCount) -} diff --git a/internal/static/config/config.go b/internal/static/config/config.go index d6f732c2af..2ec8f4f590 100644 --- a/internal/static/config/config.go +++ b/internal/static/config/config.go @@ -10,18 +10,14 @@ import ( "unicode" "github.com/drone/envsubst/v2" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/agent/internal/build" "github.com/grafana/agent/internal/static/config/encoder" "github.com/grafana/agent/internal/static/config/features" - "github.com/grafana/agent/internal/static/config/instrumentation" "github.com/grafana/agent/internal/static/logs" "github.com/grafana/agent/internal/static/metrics" "github.com/grafana/agent/internal/static/server" "github.com/grafana/agent/internal/static/traces" "github.com/grafana/agent/internal/util" - "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -166,14 +162,6 @@ func (c Config) MarshalYAML() (interface{}, error) { return m, nil } -// LogDeprecations will log use of any deprecated fields to l as warn-level -// messages. -func (c *Config) LogDeprecations(l log.Logger) { - for _, d := range c.Deprecations { - level.Warn(l).Log("msg", fmt.Sprintf("DEPRECATION NOTICE: %s", d)) - } -} - // Validate validates the config, flags, and sets default values. func (c *Config) Validate(fs *flag.FlagSet) error { if c.Server == nil { @@ -243,101 +231,6 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&c.EnableConfigEndpoints, "config.enable-read-api", false, "Enables the /-/config and /agent/api/v1/configs/{name} APIs. Be aware that secrets could be exposed by enabling these endpoints!") } -// LoadFile reads a file and passes the contents to Load -func LoadFile(filename string, expandEnvVars bool, c *Config) error { - buf, err := os.ReadFile(filename) - if err != nil { - return fmt.Errorf("error reading config file %w", err) - } - instrumentation.InstrumentConfig(buf) - return LoadBytes(buf, expandEnvVars, c) -} - -// loadFromAgentManagementAPI loads and merges a config from an Agent Management API. -// 1. Read local initial config. -// 2. Get the remote config. -// a) Fetch from remote. If this fails or is invalid: -// b) Read the remote config from cache. If this fails, return an error. -// 4. Merge the initial and remote config into c. -func loadFromAgentManagementAPI(path string, expandEnvVars bool, c *Config, log *server.Logger, fs *flag.FlagSet) error { - // Load the initial config from disk without instrumenting the config hash - buf, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("error reading initial config file %w", err) - } - - err = LoadBytes(buf, expandEnvVars, c) - if err != nil { - return fmt.Errorf("failed to load initial config: %w", err) - } - - configProvider, err := newRemoteConfigProvider(c) - if err != nil { - return err - } - remoteConfig, err := getRemoteConfig(expandEnvVars, configProvider, log, fs, true) - if err != nil { - return err - } - mergeEffectiveConfig(c, remoteConfig) - - effectiveConfigBytes, err := yaml.Marshal(c) - if err != nil { - level.Warn(log).Log("msg", "error marshalling config for instrumenting config version", "err", err) - } else { - instrumentation.InstrumentConfig(effectiveConfigBytes) - } - - return nil -} - -// mergeEffectiveConfig overwrites any values in initialConfig with those in remoteConfig -func mergeEffectiveConfig(initialConfig *Config, remoteConfig *Config) { - initialConfig.Server = remoteConfig.Server - initialConfig.Metrics = remoteConfig.Metrics - initialConfig.Integrations = remoteConfig.Integrations - initialConfig.Traces = remoteConfig.Traces - initialConfig.Logs = remoteConfig.Logs -} - -// LoadRemote reads a config from url -func LoadRemote(url string, expandEnvVars bool, c *Config) error { - remoteOpts := &remoteOpts{} - if c.BasicAuthUser != "" && c.BasicAuthPassFile != "" { - remoteOpts.HTTPClientConfig = &config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: c.BasicAuthUser, - PasswordFile: c.BasicAuthPassFile, - }, - } - } - - if remoteOpts.HTTPClientConfig != nil { - dir, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current working directory: %w", err) - } - remoteOpts.HTTPClientConfig.SetDirectory(dir) - } - - rc, err := newRemoteProvider(url, remoteOpts) - if err != nil { - return fmt.Errorf("error reading remote config: %w", err) - } - // fall back to file if no scheme is passed - if rc == nil { - return LoadFile(url, expandEnvVars, c) - } - bb, _, err := rc.retrieve() - if err != nil { - return fmt.Errorf("error retrieving remote config: %w", err) - } - - instrumentation.InstrumentConfig(bb) - - return LoadBytes(bb, expandEnvVars, c) -} - func performEnvVarExpansion(buf []byte, expandEnvVars bool) ([]byte, error) { utf8Buf, err := encoder.EnsureUTF8(buf, false) if err != nil { @@ -385,29 +278,6 @@ func getenv(name string) string { return os.Getenv(name) } -// Load loads a config file from a flagset. Flags will be registered -// to the flagset before parsing them with the values specified by -// args. -func Load(fs *flag.FlagSet, args []string, log *server.Logger) (*Config, error) { - cfg, error := LoadFromFunc(fs, args, func(path, fileType string, expandEnvVars bool, c *Config) error { - switch fileType { - case fileTypeYAML: - if features.Enabled(fs, featRemoteConfigs) { - return LoadRemote(path, expandEnvVars, c) - } - if features.Enabled(fs, featAgentManagement) { - return loadFromAgentManagementAPI(path, expandEnvVars, c, log, fs) - } - return LoadFile(path, expandEnvVars, c) - default: - return fmt.Errorf("unknown file type %q. accepted values: %s", fileType, strings.Join(fileTypes, ", ")) - } - }) - - instrumentation.InstrumentLoad(error == nil) - return cfg, error -} - type loaderFunc func(path string, fileType string, expandEnvVars bool, target *Config) error func applyIntegrationValuesFromFlagset(fs *flag.FlagSet, args []string, path string, cfg *Config) error { diff --git a/internal/static/config/config_test.go b/internal/static/config/config_test.go index 8b7e7aef72..364f2f2513 100644 --- a/internal/static/config/config_test.go +++ b/internal/static/config/config_test.go @@ -13,13 +13,11 @@ import ( "github.com/grafana/agent/internal/static/config/encoder" "github.com/grafana/agent/internal/static/metrics" "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/server" "github.com/grafana/agent/internal/util" commonCfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promCfg "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -457,71 +455,18 @@ func TestAgent_OmitEmptyFields(t *testing.T) { require.Equal(t, "{}\n", string(yml)) } -func TestAgentManagement_MergeEffectiveConfig(t *testing.T) { - initialCfg := ` -server: - log_level: info -logs: - positions_directory: /tmp -agent_management: - host: "localhost" - basic_auth: - username: "initial_user" - protocol: "http" - polling_interval: "1m" - remote_configuration: - namespace: "new_namespace" - cache_location: "/etc"` - - remoteCfg := ` -server: - log_level: debug -metrics: - wal_directory: /tmp - global: - scrape_interval: 5m -integrations: - scrape_integrations: true - -agent_management: - host: "localhost:80" - basic_auth: - username: "new_user" - protocol: "http" - polling_interval: "10s" - remote_configuration: - namespace: "new_namespace" - cache_location: "/etc"` - - var ic, rc Config - err := LoadBytes([]byte(initialCfg), false, &ic) - assert.NoError(t, err) - err = LoadBytes([]byte(remoteCfg), false, &rc) - assert.NoError(t, err) - - // keep a copy of the initial config's agent management block to ensure it isn't - // overwritten by the remote config's - initialAgentManagement := ic.AgentManagement - mergeEffectiveConfig(&ic, &rc) - - // agent_management configuration should not be overwritten by the remote config - assert.Equal(t, initialAgentManagement, ic.AgentManagement) - - // since these elements are purposefully different for the previous portion of the test, - // unset them before comparing the rest of the config - ic.AgentManagement = AgentManagementConfig{} - rc.AgentManagement = AgentManagementConfig{} - - assert.True(t, util.CompareYAML(ic, rc)) -} - func TestConfig_EmptyServerConfigFails(t *testing.T) { // Since we are testing defaults via config.Load, we need a file instead of a string. // This test file has an empty server stanza, we expect default values out. - defaultServerCfg := server.DefaultConfig() - logger := server.NewLogger(&defaultServerCfg) fs := flag.NewFlagSet("", flag.ExitOnError) - _, err := Load(fs, []string{"--config.file", "./testdata/server_empty.yml"}, logger) + + _, err := LoadFromFunc(fs, []string{"--config.file", "./testdata/server_empty.yml"}, func(path, fileType string, expandEnvVars bool, target *Config) error { + bb, err := os.ReadFile(path) + if err != nil { + return err + } + return LoadBytes(bb, expandEnvVars, target) + }) require.Error(t, err) } diff --git a/internal/static/config/integrations.go b/internal/static/config/integrations.go index c4ebb70ec3..f0d2652e6d 100644 --- a/internal/static/config/integrations.go +++ b/internal/static/config/integrations.go @@ -4,15 +4,11 @@ import ( "fmt" "reflect" - "github.com/go-kit/log" - "github.com/gorilla/mux" v1 "github.com/grafana/agent/internal/static/integrations" v2 "github.com/grafana/agent/internal/static/integrations/v2" "github.com/grafana/agent/internal/static/metrics" "github.com/grafana/agent/internal/static/server" "github.com/grafana/agent/internal/util" - "github.com/prometheus/statsd_exporter/pkg/level" - "golang.org/x/exp/maps" "gopkg.in/yaml.v2" ) @@ -129,64 +125,3 @@ func (c *VersionedIntegrations) setVersion(v IntegrationsVersion) error { panic(fmt.Sprintf("unknown integrations version %d", c.Version)) } } - -// EnabledIntegrations returns a slice of enabled integrations -func (c *VersionedIntegrations) EnabledIntegrations() []string { - integrations := map[string]struct{}{} - if c.ConfigV1 != nil { - for _, integration := range c.ConfigV1.Integrations { - integrations[integration.Name()] = struct{}{} - } - } - if c.ConfigV2 != nil { - for _, integration := range c.ConfigV2.Configs { - integrations[integration.Name()] = struct{}{} - } - } - return maps.Keys(integrations) -} - -// IntegrationsGlobals is a global struct shared across integrations. -type IntegrationsGlobals = v2.Globals - -// Integrations is an abstraction over both the v1 and v2 systems. -type Integrations interface { - ApplyConfig(*VersionedIntegrations, IntegrationsGlobals) error - WireAPI(*mux.Router) - Stop() -} - -// NewIntegrations creates a new subsystem. globals should be provided regardless -// of useV2. globals.SubsystemOptions will be automatically set if cfg.Version -// is set to IntegrationsVersion2. -func NewIntegrations(logger log.Logger, cfg *VersionedIntegrations, globals IntegrationsGlobals) (Integrations, error) { - if cfg.Version != IntegrationsVersion2 { - instance, err := v1.NewManager(*cfg.ConfigV1, logger, globals.Metrics.InstanceManager(), globals.Metrics.Validate) - if err != nil { - return nil, err - } - return &v1Integrations{Manager: instance}, nil - } - - level.Warn(logger).Log("msg", "integrations-next is enabled. integrations-next is subject to change") - - globals.SubsystemOpts = *cfg.ConfigV2 - instance, err := v2.NewSubsystem(logger, globals) - if err != nil { - return nil, err - } - return &v2Integrations{Subsystem: instance}, nil -} - -type v1Integrations struct{ *v1.Manager } - -func (s *v1Integrations) ApplyConfig(cfg *VersionedIntegrations, _ IntegrationsGlobals) error { - return s.Manager.ApplyConfig(*cfg.ConfigV1) -} - -type v2Integrations struct{ *v2.Subsystem } - -func (s *v2Integrations) ApplyConfig(cfg *VersionedIntegrations, globals IntegrationsGlobals) error { - globals.SubsystemOpts = *cfg.ConfigV2 - return s.Subsystem.ApplyConfig(globals) -} diff --git a/internal/static/config/integrations_test.go b/internal/static/config/integrations_test.go index b0854f3219..d3537997e0 100644 --- a/internal/static/config/integrations_test.go +++ b/internal/static/config/integrations_test.go @@ -2,7 +2,6 @@ package config import ( "flag" - "sort" "testing" "github.com/stretchr/testify/require" @@ -46,74 +45,6 @@ integrations: require.NotNil(t, c.Integrations.ConfigV2) } -func TestEnabledIntegrations_v1(t *testing.T) { - cfg := ` -metrics: - wal_directory: /tmp/wal - -integrations: - agent: - enabled: true - node_exporter: - enabled: true` - - fs := flag.NewFlagSet("test", flag.ExitOnError) - c, err := LoadFromFunc(fs, []string{"-config.file", "test"}, func(_, _ string, _ bool, c *Config) error { - return LoadBytes([]byte(cfg), false, c) - }) - require.NoError(t, err) - - actual := c.Integrations.EnabledIntegrations() - sort.Strings(actual) - expected := []string{"agent", "node_exporter"} - sort.Strings(expected) - require.Equal(t, actual, expected) -} - -func TestEnabledIntegrations_v2(t *testing.T) { - cfg := ` -metrics: - wal_directory: /tmp/wal - -integrations: - agent: - autoscrape: - enable: false - node_exporter: - autoscrape: - enable: false` - - fs := flag.NewFlagSet("test", flag.ExitOnError) - c, err := LoadFromFunc(fs, []string{"-config.file", "test", "-enable-features=integrations-next"}, func(_, _ string, _ bool, c *Config) error { - return LoadBytes([]byte(cfg), false, c) - }) - require.NoError(t, err) - - actual := c.Integrations.EnabledIntegrations() - sort.Strings(actual) - expected := []string{"agent", "node_exporter"} - sort.Strings(expected) - require.Equal(t, actual, expected) -} - -func TestEnabledIntegrations_v2MultipleInstances(t *testing.T) { - cfg := ` -metrics: - wal_directory: /tmp/wal - -integrations: - redis_configs: - - redis_addr: "redis-0:6379" - - redis_addr: "redis-1:6379"` - - fs := flag.NewFlagSet("test", flag.ExitOnError) - c, err := LoadFromFunc(fs, []string{"-config.file", "test", "-enable-features=integrations-next"}, func(_, _ string, _ bool, c *Config) error { - return LoadBytes([]byte(cfg), false, c) - }) - require.NoError(t, err) - require.Equal(t, c.Integrations.EnabledIntegrations(), []string{"redis"}) -} - func TestSetVersionDoesNotOverrideExistingV1Integrations(t *testing.T) { cfg := ` integrations: diff --git a/internal/static/config/remote_config.go b/internal/static/config/remote_config.go deleted file mode 100644 index 6b23c89ea1..0000000000 --- a/internal/static/config/remote_config.go +++ /dev/null @@ -1,145 +0,0 @@ -package config - -import ( - "fmt" - "io" - "net/http" - "net/url" - "time" - - "github.com/grafana/agent/internal/static/config/instrumentation" - "github.com/prometheus/common/config" -) - -// supported remote config provider schemes -const ( - httpScheme = "http" - httpsScheme = "https" -) - -// remoteOpts struct contains agent remote config options -type remoteOpts struct { - url *url.URL - HTTPClientConfig *config.HTTPClientConfig - headers map[string]string -} - -// remoteProvider interface should be implemented by config providers -type remoteProvider interface { - retrieve() ([]byte, http.Header, error) -} - -// newRemoteProvider constructs a new remote configuration provider. The rawURL is parsed -// and a provider is constructed based on the URL's scheme. -func newRemoteProvider(rawURL string, opts *remoteOpts) (remoteProvider, error) { - u, err := url.Parse(rawURL) - if err != nil { - return nil, fmt.Errorf("error parsing rawURL %s: %w", rawURL, err) - } - if opts == nil { - // Default provider opts - opts = &remoteOpts{} - } - opts.url = u - - switch u.Scheme { - case "": - // if no scheme, assume local file path, return nil and let caller handle. - return nil, nil - case httpScheme, httpsScheme: - httpP, err := newHTTPProvider(opts) - if err != nil { - return nil, fmt.Errorf("error constructing httpProvider: %w", err) - } - return httpP, nil - default: - return nil, fmt.Errorf("remote config scheme not supported: %s", u.Scheme) - } -} - -// Remote Config Providers -// httpProvider - http/https provider -type httpProvider struct { - myURL *url.URL - headers map[string]string - httpClient *http.Client -} - -// newHTTPProvider constructs a new httpProvider -func newHTTPProvider(opts *remoteOpts) (*httpProvider, error) { - httpClientConfig := config.HTTPClientConfig{} - if opts.HTTPClientConfig != nil { - err := opts.HTTPClientConfig.Validate() - if err != nil { - return nil, err - } - httpClientConfig = *opts.HTTPClientConfig - } - httpClient, err := config.NewClientFromConfig(httpClientConfig, "remote-config") - if err != nil { - return nil, err - } - return &httpProvider{ - myURL: opts.url, - httpClient: httpClient, - headers: opts.headers, - }, nil -} - -type retryAfterError struct { - retryAfter time.Duration -} - -func (r retryAfterError) Error() string { - return fmt.Sprintf("server indicated to retry after %s", r.retryAfter) -} - -type notModifiedError struct{} - -func (n notModifiedError) Error() string { - return "server indicated no changes" -} - -// retrieve implements remoteProvider and fetches the config -func (p httpProvider) retrieve() ([]byte, http.Header, error) { - req, err := http.NewRequest(http.MethodGet, p.myURL.String(), nil) - if err != nil { - return nil, nil, fmt.Errorf("error creating request: %w", err) - } - for header, headerVal := range p.headers { - req.Header.Set(header, headerVal) - } - response, err := p.httpClient.Do(req) - if err != nil { - instrumentation.InstrumentRemoteConfigFetchError() - return nil, nil, fmt.Errorf("request failed: %w", err) - } - defer response.Body.Close() - - instrumentation.InstrumentRemoteConfigFetch(response.StatusCode) - - if response.StatusCode == http.StatusTooManyRequests || response.StatusCode == http.StatusServiceUnavailable { - retryAfter := response.Header.Get("Retry-After") - if retryAfter == "" { - return nil, nil, fmt.Errorf("server indicated to retry, but no Retry-After header was provided") - } - retryAfterDuration, err := time.ParseDuration(retryAfter) - if err != nil { - return nil, nil, fmt.Errorf("server indicated to retry, but Retry-After header was not a valid duration: %w", err) - } - return nil, nil, retryAfterError{retryAfter: retryAfterDuration} - } - - if response.StatusCode == http.StatusNotModified { - return nil, nil, notModifiedError{} - } - - if response.StatusCode/100 != 2 { - return nil, nil, fmt.Errorf("error fetching config: status code: %d", response.StatusCode) - } - bb, err := io.ReadAll(response.Body) - if err != nil { - return nil, nil, err - } - return bb, response.Header, nil -} diff --git a/internal/static/config/remote_config_test.go b/internal/static/config/remote_config_test.go deleted file mode 100644 index f8b5b046ce..0000000000 --- a/internal/static/config/remote_config_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package config - -import ( - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/prometheus/common/config" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const configPath = "/agent.yml" - -func TestRemoteConfigHTTP(t *testing.T) { - testCfg := ` -metrics: - global: - scrape_timeout: 33s -` - - svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == configPath { - _, _ = w.Write([]byte(testCfg)) - } - })) - - svrWithBasicAuth := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - user, pass, _ := r.BasicAuth() - if user != "foo" && pass != "bar" { - w.WriteHeader(http.StatusUnauthorized) - return - } - if r.URL.Path == configPath { - _, _ = w.Write([]byte(testCfg)) - } - })) - - svrWithHeaders := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == configPath { - w.Header().Add("X-Test-Header", "test") - w.Header().Add("X-Other-Header", "test2") - _, _ = w.Write([]byte(testCfg)) - } - })) - - tempDir := t.TempDir() - err := os.WriteFile(fmt.Sprintf("%s/password-file.txt", tempDir), []byte("bar"), 0644) - require.NoError(t, err) - - passwdFileCfg := &config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: "foo", - PasswordFile: fmt.Sprintf("%s/password-file.txt", tempDir), - }, - } - dir, err := os.Getwd() - require.NoError(t, err) - passwdFileCfg.SetDirectory(dir) - - type args struct { - rawURL string - opts *remoteOpts - } - tests := []struct { - name string - args args - want []byte - wantErr bool - wantHeaders map[string][]string - }{ - { - name: "httpScheme config", - args: args{ - rawURL: fmt.Sprintf("%s/agent.yml", svr.URL), - }, - want: []byte(testCfg), - wantErr: false, - }, - { - name: "httpScheme config with basic auth", - args: args{ - rawURL: fmt.Sprintf("%s/agent.yml", svrWithBasicAuth.URL), - opts: &remoteOpts{ - HTTPClientConfig: &config.HTTPClientConfig{ - BasicAuth: &config.BasicAuth{ - Username: "foo", - Password: "bar", - }, - }, - }, - }, - want: []byte(testCfg), - wantErr: false, - }, - { - name: "httpScheme config with basic auth password file", - args: args{ - rawURL: fmt.Sprintf("%s/agent.yml", svrWithBasicAuth.URL), - opts: &remoteOpts{ - HTTPClientConfig: passwdFileCfg, - }, - }, - want: []byte(testCfg), - wantErr: false, - }, - { - name: "unsupported scheme throws error", - args: args{ - rawURL: "ssh://unsupported/scheme", - }, - want: nil, - wantErr: true, - }, - { - name: "invalid url throws error", - args: args{ - rawURL: "://invalid/url", - }, - want: nil, - wantErr: true, - }, - { - name: "response headers are returned", - args: args{ - rawURL: fmt.Sprintf("%s/agent.yml", svrWithHeaders.URL), - }, - want: []byte(testCfg), - wantErr: false, - wantHeaders: map[string][]string{ - "X-Test-Header": {"test"}, - "X-Other-Header": {"test2"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rc, err := newRemoteProvider(tt.args.rawURL, tt.args.opts) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - bb, header, err := rc.retrieve() - assert.NoError(t, err) - assert.Equal(t, string(tt.want), string(bb)) - for k, v := range tt.wantHeaders { - assert.Equal(t, v, header[k]) - } - }) - } -} diff --git a/internal/static/integrations/cadvisor/cadvisor_stub.go b/internal/static/integrations/cadvisor/cadvisor_stub.go index 80a038b85d..35bdd3455b 100644 --- a/internal/static/integrations/cadvisor/cadvisor_stub.go +++ b/internal/static/integrations/cadvisor/cadvisor_stub.go @@ -3,7 +3,11 @@ package cadvisor //nolint:golint import ( + "context" + "net/http" + "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/agent/internal/static/integrations/config" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -12,5 +16,24 @@ import ( // NewIntegration creates a new cadvisor integration func (c *Config) NewIntegration(logger log.Logger) (integrations.Integration, error) { level.Warn(logger).Log("msg", "the cadvisor integration only works on linux; enabling it on other platforms will do nothing") - return &integrations.StubIntegration{}, nil + return &stubIntegration{}, nil +} + +// stubIntegration implements a no-op integration for use on platforms not supported by an integration +type stubIntegration struct{} + +// MetricsHandler returns an http.NotFoundHandler to satisfy the Integration interface +func (i *stubIntegration) MetricsHandler() (http.Handler, error) { + return http.NotFoundHandler(), nil +} + +// ScrapeConfigs returns an empty list of scrape configs, since there is nothing to scrape +func (i *stubIntegration) ScrapeConfigs() []config.ScrapeConfig { + return []config.ScrapeConfig{} +} + +// Run just waits for the context to finish +func (i *stubIntegration) Run(ctx context.Context) error { + <-ctx.Done() + return ctx.Err() } diff --git a/internal/static/integrations/manager.go b/internal/static/integrations/manager.go index de22be37e8..59760b9c7f 100644 --- a/internal/static/integrations/manager.go +++ b/internal/static/integrations/manager.go @@ -1,39 +1,18 @@ package integrations import ( - "context" "fmt" - "net/http" - "path" - "strings" - "sync" "time" config_util "github.com/prometheus/common/config" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/metrics/instance/configstore" "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" promConfig "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/relabel" ) -var ( - integrationAbnormalExits = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "agent_metrics_integration_abnormal_exits_total", - Help: "Total number of times an agent integration exited unexpectedly, causing it to be restarted.", - }, []string{"integration_name"}) -) - var CurrentManagerConfig ManagerConfig = DefaultManagerConfig() // DefaultManagerConfig holds the default settings for integrations. @@ -153,400 +132,3 @@ func (c *ManagerConfig) ApplyDefaults(sflags *server.Flags, mcfg *metrics.Config return nil } - -// Manager manages a set of integrations and runs them. -type Manager struct { - logger log.Logger - - cfgMut sync.RWMutex - cfg ManagerConfig - - hostname string - - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - - im instance.Manager - validator configstore.Validator - - integrationsMut sync.RWMutex - integrations map[string]*integrationProcess - - handlerMut sync.Mutex - handlerCache map[string]handlerCacheEntry -} - -// NewManager creates a new integrations manager. NewManager must be given an -// InstanceManager which is responsible for accepting instance configs to -// scrape and send metrics from running integrations. -func NewManager(cfg ManagerConfig, logger log.Logger, im instance.Manager, validate configstore.Validator) (*Manager, error) { - ctx, cancel := context.WithCancel(context.Background()) - - m := &Manager{ - logger: logger, - - ctx: ctx, - cancel: cancel, - - im: im, - validator: validate, - - integrations: make(map[string]*integrationProcess, len(cfg.Integrations)), - - handlerCache: make(map[string]handlerCacheEntry), - } - - var err error - m.hostname, err = instance.Hostname() - if err != nil { - return nil, err - } - - if err := m.ApplyConfig(cfg); err != nil { - return nil, fmt.Errorf("failed applying config: %w", err) - } - return m, nil -} - -// ApplyConfig updates the configuration of the integrations subsystem. -func (m *Manager) ApplyConfig(cfg ManagerConfig) error { - var failed bool - - m.cfgMut.Lock() - defer m.cfgMut.Unlock() - - m.integrationsMut.Lock() - defer m.integrationsMut.Unlock() - - // The global prometheus config settings don't get applied to integrations until later. This - // causes us to skip reload when those settings change. - if util.CompareYAML(m.cfg, cfg) && util.CompareYAML(m.cfg.PrometheusGlobalConfig, cfg.PrometheusGlobalConfig) { - level.Debug(m.logger).Log("msg", "Integrations config is unchanged skipping apply") - return nil - } - level.Debug(m.logger).Log("msg", "Applying integrations config changes") - - select { - case <-m.ctx.Done(): - return fmt.Errorf("Manager already stopped") - default: - // No-op - } - - // Iterate over our integrations. New or changed integrations will be - // started, with their existing counterparts being shut down. - for _, ic := range cfg.Integrations { - if !ic.Common.Enabled { - continue - } - // Key is used to identify the instance of this integration within the - // instance manager and within our set of running integrations. - key := integrationKey(ic.Name()) - - // Look for an existing integration with the same key. If it exists and - // is unchanged, we have nothing to do. Otherwise, we're going to recreate - // it with the new settings, so we'll need to stop it. - if p, exist := m.integrations[key]; exist { - if util.CompareYAMLWithHook(p.cfg, ic, noScrubbedSecretsHook) { - continue - } - p.stop() - delete(m.integrations, key) - } - - l := log.With(m.logger, "integration", ic.Name()) - i, err := ic.NewIntegration(l) - if err != nil { - level.Error(m.logger).Log("msg", "failed to initialize integration. it will not run or be scraped", "integration", ic.Name(), "err", err) - failed = true - - // If this integration was running before, its instance won't be cleaned - // up since it's now removed from the map. We need to clean it up here. - _ = m.im.DeleteConfig(key) - continue - } - - // Find what instance label should be used to represent this integration. - var instanceKey string - if kp := ic.Common.InstanceKey; kp != nil { - // Common config takes precedence. - instanceKey = strings.TrimSpace(*kp) - } else { - instanceKey, err = ic.InstanceKey(fmt.Sprintf("%s:%d", m.hostname, cfg.ListenPort)) - if err != nil { - level.Error(m.logger).Log("msg", "failed to get instance key for integration. it will not run or be scraped", "integration", ic.Name(), "err", err) - failed = true - - // If this integration was running before, its instance won't be cleaned - // up since it's now removed from the map. We need to clean it up here. - _ = m.im.DeleteConfig(key) - continue - } - } - - // Create, start, and register the new integration. - ctx, cancel := context.WithCancel(m.ctx) - p := &integrationProcess{ - log: m.logger, - cfg: ic, - i: i, - instanceKey: instanceKey, - - ctx: ctx, - stop: cancel, - - wg: &m.wg, - wait: m.instanceBackoff, - } - go p.Run() - m.integrations[key] = p - } - - // Delete instances and processed that have been removed in between calls to - // ApplyConfig. - for key, process := range m.integrations { - foundConfig := false - for _, ic := range cfg.Integrations { - if integrationKey(ic.Name()) == key { - // If this is disabled then we should delete from integrations - if !ic.Common.Enabled { - break - } - foundConfig = true - break - } - } - if foundConfig { - continue - } - - _ = m.im.DeleteConfig(key) - process.stop() - delete(m.integrations, key) - } - - // Re-apply configs to our instance manager for all running integrations. - // Generated scrape configs may change in between calls to ApplyConfig even - // if the configs for the integration didn't. - for key, p := range m.integrations { - shouldCollect := cfg.ScrapeIntegrations - if common := p.cfg.Common; common.ScrapeIntegration != nil { - shouldCollect = *common.ScrapeIntegration - } - - switch shouldCollect { - case true: - instanceConfig := m.instanceConfigForIntegration(p, cfg) - if err := m.validator(&instanceConfig); err != nil { - level.Error(p.log).Log("msg", "failed to validate generated scrape config for integration. integration will not be scraped", "err", err, "integration", p.cfg.Name()) - failed = true - break - } - - if err := m.im.ApplyConfig(instanceConfig); err != nil { - level.Error(p.log).Log("msg", "failed to apply integration. integration will not be scraped", "err", err, "integration", p.cfg.Name()) - failed = true - } - case false: - // If a previous instance of the config was being scraped, we need to - // delete it here. Calling DeleteConfig when nothing is running is a safe - // operation. - _ = m.im.DeleteConfig(key) - } - } - - m.cfg = cfg - - if failed { - return fmt.Errorf("not all integrations were correctly updated") - } - return nil -} - -func noScrubbedSecretsHook(in interface{}) (ok bool, out interface{}, err error) { - switch v := in.(type) { - case config_util.Secret: - return true, string(v), nil - case *config_util.URL: - return true, v.String(), nil - default: - return false, nil, nil - } -} - -// integrationProcess is a running integration. -type integrationProcess struct { - log log.Logger - ctx context.Context - stop context.CancelFunc - cfg UnmarshaledConfig - instanceKey string // Value for the `instance` label - i Integration - - wg *sync.WaitGroup - wait func(cfg Config, err error) -} - -// Run runs the integration until the process is canceled. -func (p *integrationProcess) Run() { - defer func() { - if r := recover(); r != nil { - err := fmt.Errorf("%v", r) - level.Error(p.log).Log("msg", "integration has panicked. THIS IS A BUG!", "err", err, "integration", p.cfg.Name()) - } - }() - - p.wg.Add(1) - defer p.wg.Done() - - for { - err := p.i.Run(p.ctx) - if err != nil && err != context.Canceled { - p.wait(p.cfg, err) - } else { - level.Info(p.log).Log("msg", "stopped integration", "integration", p.cfg.Name()) - break - } - } -} - -func (m *Manager) instanceBackoff(cfg Config, err error) { - m.cfgMut.RLock() - defer m.cfgMut.RUnlock() - - integrationAbnormalExits.WithLabelValues(cfg.Name()).Inc() - level.Error(m.logger).Log("msg", "integration stopped abnormally, restarting after backoff", "err", err, "integration", cfg.Name(), "backoff", m.cfg.IntegrationRestartBackoff) - time.Sleep(m.cfg.IntegrationRestartBackoff) -} - -func (m *Manager) instanceConfigForIntegration(p *integrationProcess, cfg ManagerConfig) instance.Config { - common := p.cfg.Common - relabelConfigs := append(cfg.DefaultRelabelConfigs(p.instanceKey), common.RelabelConfigs...) - - schema := "http" - // Check for HTTPS support - var httpClientConfig config_util.HTTPClientConfig - if cfg.ServerUsingTLS { - schema = "https" - httpClientConfig.TLSConfig = cfg.TLSConfig - } - - var scrapeConfigs []*promConfig.ScrapeConfig - - for _, isc := range p.i.ScrapeConfigs() { - sc := &promConfig.ScrapeConfig{ - JobName: fmt.Sprintf("integrations/%s", isc.JobName), - MetricsPath: path.Join("/integrations", p.cfg.Name(), isc.MetricsPath), - Params: isc.QueryParams, - Scheme: schema, - HonorLabels: false, - HonorTimestamps: true, - ScrapeInterval: model.Duration(common.ScrapeInterval), - ScrapeTimeout: model.Duration(common.ScrapeTimeout), - ServiceDiscoveryConfigs: m.scrapeServiceDiscovery(cfg), - RelabelConfigs: relabelConfigs, - MetricRelabelConfigs: common.MetricRelabelConfigs, - HTTPClientConfig: httpClientConfig, - } - - scrapeConfigs = append(scrapeConfigs, sc) - } - - instanceCfg := instance.DefaultConfig - instanceCfg.Name = integrationKey(p.cfg.Name()) - instanceCfg.ScrapeConfigs = scrapeConfigs - instanceCfg.RemoteWrite = cfg.PrometheusRemoteWrite - if common.WALTruncateFrequency > 0 { - instanceCfg.WALTruncateFrequency = common.WALTruncateFrequency - } - return instanceCfg -} - -// integrationKey returns the key for an integration Config, used for its -// instance name and name in the process cache. -func integrationKey(name string) string { - return fmt.Sprintf("integration/%s", name) -} - -func (m *Manager) scrapeServiceDiscovery(cfg ManagerConfig) discovery.Configs { - // A blank host somehow works, but it then requires a sever name to be set under tls. - newHost := cfg.ListenHost - if newHost == "" { - newHost = "127.0.0.1" - } - localAddr := fmt.Sprintf("%s:%d", newHost, cfg.ListenPort) - labels := model.LabelSet{} - labels[model.LabelName("agent_hostname")] = model.LabelValue(m.hostname) - for k, v := range cfg.Labels { - labels[k] = v - } - - return discovery.Configs{ - discovery.StaticConfig{{ - Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(localAddr)}}, - Labels: labels, - }}, - } -} - -// WireAPI hooks up /metrics routes per-integration. -func (m *Manager) WireAPI(r *mux.Router) { - r.HandleFunc("/integrations/{name}/metrics", func(rw http.ResponseWriter, r *http.Request) { - m.integrationsMut.RLock() - defer m.integrationsMut.RUnlock() - - key := integrationKey(mux.Vars(r)["name"]) - handler := m.loadHandler(key) - handler.ServeHTTP(rw, r) - }) -} - -// loadHandler will perform a dynamic lookup of an HTTP handler for an -// integration. loadHandler should be called with a read lock on the -// integrations mutex. -func (m *Manager) loadHandler(key string) http.Handler { - m.handlerMut.Lock() - defer m.handlerMut.Unlock() - - // Search the integration by name to see if it's still running. - p, ok := m.integrations[key] - if !ok { - delete(m.handlerCache, key) - return http.NotFoundHandler() - } - - // Now look in the cache for a handler for the running process. - cacheEntry, ok := m.handlerCache[key] - if ok && cacheEntry.process == p { - return cacheEntry.handler - } - - // New integration process that hasn't been scraped before. Generate - // a handler for it and cache it. - handler, err := p.i.MetricsHandler() - if err != nil { - level.Error(m.logger).Log("msg", "could not create http handler for integration", "integration", p.cfg.Name(), "err", err) - return http.HandlerFunc(internalServiceError) - } - - cacheEntry = handlerCacheEntry{handler: handler, process: p} - m.handlerCache[key] = cacheEntry - return cacheEntry.handler -} - -func internalServiceError(w http.ResponseWriter, r *http.Request) { - http.Error(w, "500 Internal Server Error", http.StatusInternalServerError) -} - -// Stop stops the manager and all of its integrations. Blocks until all running -// integrations exit. -func (m *Manager) Stop() { - m.cancel() - m.wg.Wait() -} - -type handlerCacheEntry struct { - handler http.Handler - process *integrationProcess -} diff --git a/internal/static/integrations/manager_test.go b/internal/static/integrations/manager_test.go deleted file mode 100644 index e44dfb6c09..0000000000 --- a/internal/static/integrations/manager_test.go +++ /dev/null @@ -1,433 +0,0 @@ -package integrations - -import ( - "context" - "fmt" - "net/http" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/config" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/model" - promConfig "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/relabel" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - "gopkg.in/yaml.v2" -) - -const mockIntegrationName = "integration/mock" - -func noOpValidator(*instance.Config) error { return nil } - -// TestConfig_MarshalEmptyIntegrations ensures that an empty set of integrations -// can be marshaled correctly. -func TestConfig_MarshalEmptyIntegrations(t *testing.T) { - cfgText := ` -scrape_integrations: true -replace_instance_label: true -integration_restart_backoff: 5s -use_hostname_label: true -` - var ( - cfg ManagerConfig - listenPort = 12345 - listenHost = "127.0.0.1" - ) - require.NoError(t, yaml.Unmarshal([]byte(cfgText), &cfg)) - - // Listen port must be set before applying defaults. Normally applied by the - // config package. - cfg.ListenPort = listenPort - cfg.ListenHost = listenHost - - outBytes, err := yaml.Marshal(cfg) - require.NoError(t, err, "Failed creating integration") - require.YAMLEq(t, cfgText, string(outBytes)) -} - -// Test that embedded integration fields in the struct can be unmarshaled and -// remarshaled back out to text. -func TestConfig_Remarshal(t *testing.T) { - RegisterIntegration(&testIntegrationA{}) - cfgText := ` -scrape_integrations: true -replace_instance_label: true -integration_restart_backoff: 5s -use_hostname_label: true -test: - text: Hello, world! - truth: true -` - var ( - cfg ManagerConfig - listenPort = 12345 - listenHost = "127.0.0.1" - ) - require.NoError(t, yaml.Unmarshal([]byte(cfgText), &cfg)) - - // Listen port must be set before applying defaults. Normally applied by the - // config package. - cfg.ListenPort = listenPort - cfg.ListenHost = listenHost - - outBytes, err := yaml.Marshal(cfg) - require.NoError(t, err, "Failed creating integration") - require.YAMLEq(t, cfgText, string(outBytes)) -} - -func TestConfig_AddressRelabels(t *testing.T) { - cfgText := ` -agent: - enabled: true -` - - var ( - cfg ManagerConfig - listenPort = 12345 - listenHost = "127.0.0.1" - ) - require.NoError(t, yaml.Unmarshal([]byte(cfgText), &cfg)) - - // Listen port must be set before applying defaults. Normally applied by the - // config package. - cfg.ListenPort = listenPort - cfg.ListenHost = listenHost - - expectHostname, _ := instance.Hostname() - relabels := cfg.DefaultRelabelConfigs(expectHostname + ":12345") - - // Ensure that the relabel configs are functional - require.Len(t, relabels, 1) - result, _ := relabel.Process(labels.FromStrings("__address__", "127.0.0.1"), relabels...) - - require.Equal(t, result.Get("instance"), expectHostname+":12345") -} - -func TestManager_instanceConfigForIntegration(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(mockManagerConfig(), log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - defer m.Stop() - - p := &integrationProcess{instanceKey: "key", cfg: makeUnmarshaledConfig(icfg, true), i: mock} - cfg := m.instanceConfigForIntegration(p, mockManagerConfig()) - - // Validate that the generated MetricsPath is a valid URL path - require.Len(t, cfg.ScrapeConfigs, 1) - require.Equal(t, "/integrations/mock/metrics", cfg.ScrapeConfigs[0].MetricsPath) -} - -func makeUnmarshaledConfig(cfg Config, enabled bool) UnmarshaledConfig { - return UnmarshaledConfig{Config: cfg, Common: config.Common{Enabled: enabled}} -} - -// TestManager_NoIntegrationsScrape ensures that configs don't get generates -// when the ScrapeIntegrations flag is disabled. -func TestManager_NoIntegrationsScrape(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - - cfg := mockManagerConfig() - cfg.ScrapeIntegrations = false - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(&icfg, true)) - - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - defer m.Stop() - - // Normally we'd use util.Eventually here, but since im.ListConfigs starts - // out with a length of zero, util.Eventually would immediately pass. Instead - // we want to wait for a bit to make sure that the length of ListConfigs - // doesn't become non-zero. - time.Sleep(time.Second) - require.Zero(t, len(im.ListConfigs())) -} - -// TestManager_NoIntegrationScrape ensures that configs don't get generates -// when the ScrapeIntegration flag is disabled on the integration. -func TestManager_NoIntegrationScrape(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - noScrape := false - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, UnmarshaledConfig{ - Config: icfg, - Common: config.Common{ScrapeIntegration: &noScrape}, - }) - - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - defer m.Stop() - - time.Sleep(time.Second) - require.Zero(t, len(im.ListConfigs())) -} - -// TestManager_StartsIntegrations tests that, when given an integration to -// launch, TestManager applies a config and runs the integration. -func TestManager_StartsIntegrations(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(icfg, true)) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - defer m.Stop() - - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 1, len(im.ListConfigs())) - }) - - // Check that the instance was set to run - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 1, int(mock.startedCount.Load())) - }) -} - -func TestManager_RestartsIntegrations(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(icfg, true)) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - defer m.Stop() - - mock.err <- fmt.Errorf("I can't believe this horrible error happened") - - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 2, int(mock.startedCount.Load())) - }) -} - -func TestManager_GracefulStop(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(icfg, true)) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 1, int(mock.startedCount.Load())) - }) - - m.Stop() - - time.Sleep(500 * time.Millisecond) - require.Equal(t, 1, int(mock.startedCount.Load()), "graceful shutdown should not have restarted the Integration") - - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, false, mock.running.Load()) - }) -} - -func TestManager_IntegrationEnabledToDisabledReload(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(icfg, true)) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - - // Test for Enabled -> Disabled - _ = m.ApplyConfig(generateMockConfigWithEnabledFlag(false)) - require.Len(t, m.integrations, 0, "Integration was disabled so should be removed from map") - _, err = m.im.GetInstance(mockIntegrationName) - require.Error(t, err, "This mock should not exist") - - // test for Disabled -> Enabled - _ = m.ApplyConfig(generateMockConfigWithEnabledFlag(true)) - require.Len(t, m.integrations, 1, "Integration was enabled so should be here") - _, err = m.im.GetInstance(mockIntegrationName) - require.NoError(t, err, "This mock should exist") - require.Len(t, m.im.ListInstances(), 1, "This instance should exist") -} - -func TestManager_IntegrationDisabledToEnabledReload(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, UnmarshaledConfig{ - Config: icfg, - Common: config.Common{Enabled: false}, - }) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - m, err := NewManager(cfg, log.NewNopLogger(), im, noOpValidator) - require.NoError(t, err) - require.Len(t, m.integrations, 0, "Integration was disabled so should be removed from map") - _, err = m.im.GetInstance(mockIntegrationName) - require.Error(t, err, "This mock should not exist") - - // test for Disabled -> Enabled - - _ = m.ApplyConfig(generateMockConfigWithEnabledFlag(true)) - require.Len(t, m.integrations, 1, "Integration was enabled so should be here") - _, err = m.im.GetInstance(mockIntegrationName) - require.NoError(t, err, "This mock should exist") - require.Len(t, m.im.ListInstances(), 1, "This instance should exist") -} - -type PromDefaultsValidator struct { - PrometheusGlobalConfig promConfig.GlobalConfig -} - -func (i *PromDefaultsValidator) validate(c *instance.Config) error { - instanceConfig := instance.GlobalConfig{ - Prometheus: i.PrometheusGlobalConfig, - } - return c.ApplyDefaults(instanceConfig) -} - -func TestManager_PromConfigChangeReloads(t *testing.T) { - mock := newMockIntegration() - icfg := mockConfig{Integration: mock} - - cfg := mockManagerConfig() - cfg.Integrations = append(cfg.Integrations, makeUnmarshaledConfig(icfg, true)) - - im := instance.NewBasicManager(instance.DefaultBasicManagerConfig, log.NewNopLogger(), mockInstanceFactory) - - startingPromConfig := mockPromConfigWithValues(model.Duration(30*time.Second), model.Duration(25*time.Second)) - cfg.PrometheusGlobalConfig = startingPromConfig - validator := PromDefaultsValidator{startingPromConfig} - - m, err := NewManager(cfg, log.NewNopLogger(), im, validator.validate) - require.NoError(t, err) - require.Len(t, m.im.ListConfigs(), 1, "Integration was enabled so should be here") - //The integration never has the prom config overrides happen so go after the running instance config instead - for _, c := range m.im.ListConfigs() { - for _, scrape := range c.ScrapeConfigs { - require.Equal(t, startingPromConfig.ScrapeInterval, scrape.ScrapeInterval) - require.Equal(t, startingPromConfig.ScrapeTimeout, scrape.ScrapeTimeout) - } - } - - newPromConfig := mockPromConfigWithValues(model.Duration(60*time.Second), model.Duration(55*time.Second)) - cfg.PrometheusGlobalConfig = newPromConfig - validator.PrometheusGlobalConfig = newPromConfig - - err = m.ApplyConfig(cfg) - require.NoError(t, err) - - require.Len(t, m.im.ListConfigs(), 1, "Integration was enabled so should be here") - //The integration never has the prom config overrides happen so go after the running instance config instead - for _, c := range m.im.ListConfigs() { - for _, scrape := range c.ScrapeConfigs { - require.Equal(t, newPromConfig.ScrapeInterval, scrape.ScrapeInterval) - require.Equal(t, newPromConfig.ScrapeTimeout, scrape.ScrapeTimeout) - } - } -} - -func generateMockConfigWithEnabledFlag(enabled bool) ManagerConfig { - enabledMock := newMockIntegration() - enabledConfig := mockConfig{Integration: enabledMock} - enabledManagerConfig := mockManagerConfig() - enabledManagerConfig.Integrations = append( - enabledManagerConfig.Integrations, - makeUnmarshaledConfig(enabledConfig, enabled), - ) - return enabledManagerConfig -} - -type mockConfig struct { - Integration *mockIntegration `yaml:"mock"` -} - -// Equal is used for cmp.Equal, since otherwise mockConfig can't be compared to itself. -func (c mockConfig) Equal(other mockConfig) bool { return c.Integration == other.Integration } - -func (c mockConfig) Name() string { return "mock" } -func (c mockConfig) InstanceKey(agentKey string) (string, error) { return agentKey, nil } - -func (c mockConfig) NewIntegration(_ log.Logger) (Integration, error) { - return c.Integration, nil -} - -type mockIntegration struct { - startedCount *atomic.Uint32 - running *atomic.Bool - err chan error -} - -func newMockIntegration() *mockIntegration { - return &mockIntegration{ - running: atomic.NewBool(true), - startedCount: atomic.NewUint32(0), - err: make(chan error), - } -} - -func (i *mockIntegration) MetricsHandler() (http.Handler, error) { - return promhttp.Handler(), nil -} - -func (i *mockIntegration) ScrapeConfigs() []config.ScrapeConfig { - return []config.ScrapeConfig{{ - JobName: "mock", - MetricsPath: "/metrics", - }} -} - -func (i *mockIntegration) Run(ctx context.Context) error { - i.startedCount.Inc() - i.running.Store(true) - defer i.running.Store(false) - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-i.err: - return err - } -} - -func mockInstanceFactory(_ instance.Config) (instance.ManagedInstance, error) { - return instance.NoOpInstance{}, nil -} - -func mockManagerConfig() ManagerConfig { - listenPort := 0 - listenHost := "127.0.0.1" - return ManagerConfig{ - ScrapeIntegrations: true, - IntegrationRestartBackoff: 0, - ListenPort: listenPort, - ListenHost: listenHost, - } -} - -func mockPromConfigWithValues(scrapeInterval model.Duration, scrapeTimeout model.Duration) promConfig.GlobalConfig { - return promConfig.GlobalConfig{ - ScrapeInterval: scrapeInterval, - ScrapeTimeout: scrapeTimeout, - } -} diff --git a/internal/static/integrations/stub_integration.go b/internal/static/integrations/stub_integration.go deleted file mode 100644 index 2d118ff82c..0000000000 --- a/internal/static/integrations/stub_integration.go +++ /dev/null @@ -1,27 +0,0 @@ -package integrations - -import ( - "context" - "net/http" - - "github.com/grafana/agent/internal/static/integrations/config" -) - -// StubIntegration implements a no-op integration for use on platforms not supported by an integration -type StubIntegration struct{} - -// MetricsHandler returns an http.NotFoundHandler to satisfy the Integration interface -func (i *StubIntegration) MetricsHandler() (http.Handler, error) { - return http.NotFoundHandler(), nil -} - -// ScrapeConfigs returns an empty list of scrape configs, since there is nothing to scrape -func (i *StubIntegration) ScrapeConfigs() []config.ScrapeConfig { - return []config.ScrapeConfig{} -} - -// Run just waits for the context to finish -func (i *StubIntegration) Run(ctx context.Context) error { - <-ctx.Done() - return ctx.Err() -} diff --git a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go b/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go index f1bdd00adb..9145115fd5 100644 --- a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go +++ b/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go @@ -1,191 +1,17 @@ package app_agent_receiver //nolint:golint import ( - "context" "fmt" - "net/http" "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" - "github.com/grafana/agent/internal/static/traces/pushreceiver" - "github.com/grafana/dskit/instrument" - "github.com/grafana/dskit/middleware" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" ) -type appAgentReceiverIntegration struct { - integrations.MetricsIntegration - appAgentReceiverHandler AppAgentReceiverHandler - logger log.Logger - conf *Config - reg prometheus.Registerer - - requestDurationCollector *prometheus.HistogramVec - receivedMessageSizeCollector *prometheus.HistogramVec - sentMessageSizeCollector *prometheus.HistogramVec - inflightRequestsCollector *prometheus.GaugeVec +func init() { + integrations.Register(&Config{}, integrations.TypeMultiplex) } -// Static typecheck tests -var ( - _ integrations.Integration = (*appAgentReceiverIntegration)(nil) - _ integrations.HTTPIntegration = (*appAgentReceiverIntegration)(nil) - _ integrations.MetricsIntegration = (*appAgentReceiverIntegration)(nil) -) - // NewIntegration converts this config into an instance of an integration func (c *Config) NewIntegration(l log.Logger, globals integrations.Globals) (integrations.Integration, error) { - reg := prometheus.NewRegistry() - sourcemapLogger := log.With(l, "subcomponent", "sourcemaps") - sourcemapStore := NewSourceMapStore(sourcemapLogger, c.SourceMaps, reg, nil, nil) - - receiverMetricsExporter := NewReceiverMetricsExporter(reg) - - var exp = []AppAgentReceiverExporter{ - receiverMetricsExporter, - } - - if len(c.LogsInstance) > 0 { - getLogsInstance := func() (logsInstance, error) { - instance := globals.Logs.Instance(c.LogsInstance) - if instance == nil { - return nil, fmt.Errorf("logs instance \"%s\" not found", c.LogsInstance) - } - return instance, nil - } - - if _, err := getLogsInstance(); err != nil { - return nil, err - } - - lokiExporter := NewLogsExporter( - l, - LogsExporterConfig{ - GetLogsInstance: getLogsInstance, - Labels: c.LogsLabels, - SendEntryTimeout: c.LogsSendTimeout, - }, - sourcemapStore, - ) - exp = append(exp, lokiExporter) - } - - if len(c.TracesInstance) > 0 { - getTracesConsumer := func() (consumer.Traces, error) { - tracesInstance := globals.Tracing.Instance(c.TracesInstance) - if tracesInstance == nil { - return nil, fmt.Errorf("traces instance \"%s\" not found", c.TracesInstance) - } - factory := tracesInstance.GetFactory(component.KindReceiver, pushreceiver.TypeStr) - if factory == nil { - return nil, fmt.Errorf("push receiver factory not found for traces instance \"%s\"", c.TracesInstance) - } - consumer := factory.(*pushreceiver.Factory).Consumer - if consumer == nil { - return nil, fmt.Errorf("consumer not set for push receiver factory on traces instance \"%s\"", c.TracesInstance) - } - return consumer, nil - } - if _, err := getTracesConsumer(); err != nil { - return nil, err - } - tracesExporter := NewTracesExporter(getTracesConsumer) - exp = append(exp, tracesExporter) - } - - handler := NewAppAgentReceiverHandler(c, exp, reg) - - metricsIntegration, err := metricsutils.NewMetricsHandlerIntegration(l, c, c.Common, globals, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) - if err != nil { - return nil, err - } - - requestDurationCollector := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "app_agent_receiver_request_duration_seconds", - Help: "Time (in seconds) spent serving HTTP requests.", - Buckets: instrument.DefBuckets, - }, []string{"method", "route", "status_code", "ws"}) - reg.MustRegister(requestDurationCollector) - - receivedMessageSizeCollector := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "app_agent_receiver_request_message_bytes", - Help: "Size (in bytes) of messages received in the request.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - reg.MustRegister(receivedMessageSizeCollector) - - sentMessageSizeCollector := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "app_agent_receiver_response_message_bytes", - Help: "Size (in bytes) of messages sent in response.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - reg.MustRegister(sentMessageSizeCollector) - - inflightRequestsCollector := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "app_agent_receiver_inflight_requests", - Help: "Current number of inflight requests.", - }, []string{"method", "route"}) - reg.MustRegister(inflightRequestsCollector) - - return &appAgentReceiverIntegration{ - MetricsIntegration: metricsIntegration, - appAgentReceiverHandler: handler, - logger: l, - conf: c, - reg: reg, - - requestDurationCollector: requestDurationCollector, - receivedMessageSizeCollector: receivedMessageSizeCollector, - sentMessageSizeCollector: sentMessageSizeCollector, - inflightRequestsCollector: inflightRequestsCollector, - }, nil -} - -// RunIntegration implements Integration -func (i *appAgentReceiverIntegration) RunIntegration(ctx context.Context) error { - r := mux.NewRouter() - r.Handle("/collect", i.appAgentReceiverHandler.HTTPHandler(i.logger)).Methods("POST", "OPTIONS") - - mw := middleware.Instrument{ - RouteMatcher: r, - Duration: i.requestDurationCollector, - RequestBodySize: i.receivedMessageSizeCollector, - ResponseBodySize: i.sentMessageSizeCollector, - InflightRequests: i.inflightRequestsCollector, - } - - srv := &http.Server{ - Addr: fmt.Sprintf("%s:%d", i.conf.Server.Host, i.conf.Server.Port), - Handler: mw.Wrap(r), - } - errChan := make(chan error, 1) - - go func() { - level.Info(i.logger).Log("msg", "starting app agent receiver", "host", i.conf.Server.Host, "port", i.conf.Server.Port) - if err := srv.ListenAndServe(); err != http.ErrServerClosed { - errChan <- err - } - }() - - select { - case <-ctx.Done(): - if err := srv.Shutdown(ctx); err != nil { - return err - } - case err := <-errChan: - close(errChan) - return err - } - - return nil -} - -func init() { - integrations.Register(&Config{}, integrations.TypeMultiplex) + return nil, fmt.Errorf("app_agent_receiver integration code has been replaced by faro.receiver component") } diff --git a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver_test.go b/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver_test.go deleted file mode 100644 index f44db4c038..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package app_agent_receiver - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/static/traces" - "github.com/grafana/agent/internal/static/traces/traceutils" - "github.com/grafana/agent/internal/util" - "github.com/phayes/freeport" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/ptrace" - "gopkg.in/yaml.v2" -) - -func Test_ReceiveTracesAndRemoteWrite(t *testing.T) { - var err error - - // - // Prepare the traces instance - // - tracesCh := make(chan ptrace.Traces) - tracesAddr := traceutils.NewTestServer(t, func(t ptrace.Traces) { - tracesCh <- t - }) - - tracesCfgText := util.Untab(fmt.Sprintf(` -configs: -- name: TEST_TRACES - receivers: - jaeger: - protocols: - thrift_compact: - remote_write: - - endpoint: %s - insecure: true - batch: - timeout: 100ms - send_batch_size: 1 - `, tracesAddr)) - - var tracesCfg traces.Config - dec := yaml.NewDecoder(strings.NewReader(tracesCfgText)) - dec.SetStrict(true) - err = dec.Decode(&tracesCfg) - require.NoError(t, err) - - traces, err := traces.New(nil, nil, prometheus.NewRegistry(), tracesCfg, &server.HookLogger{}) - require.NoError(t, err) - t.Cleanup(traces.Stop) - - // - // Prepare the app_agent_receiver integration - // - integrationPort, err := freeport.GetFreePort() - require.NoError(t, err) - - var integrationCfg Config - cb := fmt.Sprintf(` -instance: TEST_APP_AGENT_RECEIVER -server: - cors_allowed_origins: - - '*' - host: '0.0.0.0' - max_allowed_payload_size: 5e+07 - port: %d - rate_limiting: - burstiness: 100 - enabled: true - rps: 100 -sourcemaps: - download: true -traces_instance: TEST_TRACES -`, integrationPort) - err = yaml.Unmarshal([]byte(cb), &integrationCfg) - require.NoError(t, err) - - logger := util.TestLogger(t) - globals := integrations.Globals{ - Tracing: traces, - } - - integration, err := integrationCfg.NewIntegration(logger, globals) - require.NoError(t, err) - - ctx := context.Background() - t.Cleanup(func() { ctx.Done() }) - // - // Start the app_agent_receiver integration - // - go func() { - err = integration.RunIntegration(ctx) - require.NoError(t, err) - }() - - // - // Send data to the integration's /collect endpoint - // - const PAYLOAD = ` -{ - "traces": { - "resourceSpans": [{ - "scopeSpans": [{ - "spans": [{ - "name": "TestSpan", - "attributes": [{ - "key": "foo", - "value": { "intValue": "11111" } - }, - { - "key": "boo", - "value": { "intValue": "22222" } - }, - { - "key": "user.email", - "value": { "stringValue": "user@email.com" } - }] - }] - }] - }] - }, - "logs": [], - "exceptions": [], - "measurements": [], - "meta": {} -} -` - - integrationURL := fmt.Sprintf("http://127.0.0.1:%d/collect", integrationPort) - - var httpResponse *http.Response - require.EventuallyWithT(t, func(c *assert.CollectT) { - req, err := http.NewRequest("POST", integrationURL, bytes.NewBuffer([]byte(PAYLOAD))) - assert.NoError(c, err) - - httpResponse, err = http.DefaultClient.Do(req) - assert.NoError(c, err) - }, 5*time.Second, 250*time.Millisecond) - - // - // Check that the data was received by the integration - // - resBody, err := io.ReadAll(httpResponse.Body) - require.NoError(t, err) - require.Equal(t, "ok", string(resBody[:])) - - require.Equal(t, http.StatusAccepted, httpResponse.StatusCode) - - // - // Check that the traces subsystem remote wrote the integration - // - select { - case <-time.After(10 * time.Second): - require.Fail(t, "failed to receive a span after 10 seconds") - case tr := <-tracesCh: - require.Equal(t, 1, tr.SpanCount()) - // Nothing to do, send succeeded. - } -} diff --git a/internal/static/integrations/v2/app_agent_receiver/handler.go b/internal/static/integrations/v2/app_agent_receiver/handler.go deleted file mode 100644 index c430e90993..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/handler.go +++ /dev/null @@ -1,126 +0,0 @@ -package app_agent_receiver - -import ( - "context" - "sync" - - "crypto/subtle" - "encoding/json" - "net/http" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/cors" - "golang.org/x/time/rate" -) - -const apiKeyHeader = "x-api-key" - -type AppAgentReceiverExporter interface { - Name() string - Export(ctx context.Context, payload Payload) error -} - -// AppAgentReceiverHandler struct controls the data ingestion http handler of the receiver -type AppAgentReceiverHandler struct { - exporters []AppAgentReceiverExporter - config *Config - rateLimiter *rate.Limiter - exporterErrorsCollector *prometheus.CounterVec -} - -// NewAppAgentReceiverHandler creates a new AppReceiver instance based on the given configuration -func NewAppAgentReceiverHandler(conf *Config, exporters []AppAgentReceiverExporter, reg prometheus.Registerer) AppAgentReceiverHandler { - var rateLimiter *rate.Limiter - if conf.Server.RateLimiting.Enabled { - var rps float64 - if conf.Server.RateLimiting.RPS > 0 { - rps = conf.Server.RateLimiting.RPS - } - - var b int - if conf.Server.RateLimiting.Burstiness > 0 { - b = conf.Server.RateLimiting.Burstiness - } - rateLimiter = rate.NewLimiter(rate.Limit(rps), b) - } - - exporterErrorsCollector := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "app_agent_receiver_exporter_errors_total", - Help: "Total number of errors produced by a receiver exporter", - }, []string{"exporter"}) - - reg.MustRegister(exporterErrorsCollector) - - return AppAgentReceiverHandler{ - exporters: exporters, - config: conf, - rateLimiter: rateLimiter, - exporterErrorsCollector: exporterErrorsCollector, - } -} - -// HTTPHandler is the http.Handler for the receiver. It will do the following -// 0. Enable CORS for the configured hosts -// 1. Check if the request should be rate limited -// 2. Verify that the payload size is within limits -// 3. Start two go routines for exporters processing and exporting data respectively -// 4. Respond with 202 once all the work is done -func (ar *AppAgentReceiverHandler) HTTPHandler(logger log.Logger) http.Handler { - var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Check rate limiting state - if ar.config.Server.RateLimiting.Enabled { - if ok := ar.rateLimiter.Allow(); !ok { - http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests) - return - } - } - - // check API key if one is provided - if len(ar.config.Server.APIKey) > 0 && subtle.ConstantTimeCompare([]byte(r.Header.Get(apiKeyHeader)), []byte(ar.config.Server.APIKey)) == 0 { - http.Error(w, "api key not provided or incorrect", http.StatusUnauthorized) - return - } - - // Verify content length. We trust net/http to give us the correct number - if ar.config.Server.MaxAllowedPayloadSize > 0 && r.ContentLength > ar.config.Server.MaxAllowedPayloadSize { - http.Error(w, http.StatusText(http.StatusRequestEntityTooLarge), http.StatusRequestEntityTooLarge) - return - } - - var p Payload - err := json.NewDecoder(r.Body).Decode(&p) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - var wg sync.WaitGroup - - for _, exporter := range ar.exporters { - wg.Add(1) - go func(exp AppAgentReceiverExporter) { - defer wg.Done() - if err := exp.Export(r.Context(), p); err != nil { - level.Error(logger).Log("msg", "exporter error", "exporter", exp.Name(), "error", err) - ar.exporterErrorsCollector.WithLabelValues(exp.Name()).Inc() - } - }(exporter) - } - - wg.Wait() - w.WriteHeader(http.StatusAccepted) - _, _ = w.Write([]byte("ok")) - }) - - if len(ar.config.Server.CORSAllowedOrigins) > 0 { - c := cors.New(cors.Options{ - AllowedOrigins: ar.config.Server.CORSAllowedOrigins, - AllowedHeaders: []string{apiKeyHeader, "content-type", "x-faro-session-id"}, - }) - handler = c.Handler(handler) - } - - return handler -} diff --git a/internal/static/integrations/v2/app_agent_receiver/handler_test.go b/internal/static/integrations/v2/app_agent_receiver/handler_test.go deleted file mode 100644 index ac0e5438c8..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/handler_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package app_agent_receiver - -import ( - "bytes" - "context" - "errors" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-kit/log" - "github.com/stretchr/testify/require" - - "github.com/prometheus/client_golang/prometheus" -) - -const PAYLOAD = ` -{ - "traces": { - "resourceSpans": [] - }, - "logs": [], - "exceptions": [], - "measurements": [], - "meta": {} -} -` - -type TestExporter struct { - name string - broken bool - payloads []Payload -} - -func (te *TestExporter) Name() string { - return te.name -} - -func (te *TestExporter) Export(ctx context.Context, payload Payload) error { - if te.broken { - return errors.New("this exporter is broken") - } - te.payloads = append(te.payloads, payload) - return nil -} - -func TestMultipleExportersAllSucceed(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - reg := prometheus.NewRegistry() - - require.NoError(t, err) - - exporter1 := TestExporter{ - name: "exporter1", - broken: false, - payloads: []Payload{}, - } - exporter2 := TestExporter{ - name: "exporter2", - broken: false, - payloads: []Payload{}, - } - - conf := &Config{} - - fr := NewAppAgentReceiverHandler(conf, []AppAgentReceiverExporter{&exporter1, &exporter2}, reg) - handler := fr.HTTPHandler(log.NewNopLogger()) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) - - require.Len(t, exporter1.payloads, 1) - require.Len(t, exporter2.payloads, 1) -} - -func TestMultipleExportersOneFails(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - require.NoError(t, err) - - reg := prometheus.NewRegistry() - - exporter1 := TestExporter{ - name: "exporter1", - broken: true, - payloads: []Payload{}, - } - exporter2 := TestExporter{ - name: "exporter2", - broken: false, - payloads: []Payload{}, - } - - conf := &Config{} - - fr := NewAppAgentReceiverHandler(conf, []AppAgentReceiverExporter{&exporter1, &exporter2}, reg) - handler := fr.HTTPHandler(log.NewNopLogger()) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - - metrics, err := reg.Gather() - require.NoError(t, err) - - metric := metrics[0] - require.Equal(t, "app_agent_receiver_exporter_errors_total", *metric.Name) - require.Len(t, metric.Metric, 1) - require.Equal(t, 1.0, *metric.Metric[0].Counter.Value) - require.Len(t, metric.Metric[0].Label, 1) - require.Equal(t, *metric.Metric[0].Label[0].Value, "exporter1") - require.Len(t, metrics, 1) - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) - require.Len(t, exporter1.payloads, 0) - require.Len(t, exporter2.payloads, 1) -} - -func TestMultipleExportersAllFail(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - reg := prometheus.NewRegistry() - - require.NoError(t, err) - - exporter1 := TestExporter{ - name: "exporter1", - broken: true, - payloads: []Payload{}, - } - exporter2 := TestExporter{ - name: "exporter2", - broken: true, - payloads: []Payload{}, - } - - conf := &Config{} - - fr := NewAppAgentReceiverHandler(conf, []AppAgentReceiverExporter{&exporter1, &exporter2}, reg) - handler := fr.HTTPHandler(log.NewNopLogger()) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - - metrics, err := reg.Gather() - require.NoError(t, err) - - require.Len(t, metrics, 1) - metric := metrics[0] - - require.Equal(t, "app_agent_receiver_exporter_errors_total", *metric.Name) - require.Len(t, metric.Metric, 2) - require.Equal(t, 1.0, *metric.Metric[0].Counter.Value) - require.Equal(t, 1.0, *metric.Metric[1].Counter.Value) - require.Len(t, metric.Metric[0].Label, 1) - require.Len(t, metric.Metric[1].Label, 1) - require.Equal(t, *metric.Metric[0].Label[0].Value, "exporter1") - require.Equal(t, *metric.Metric[1].Label[0].Value, "exporter2") - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) - require.Len(t, exporter1.payloads, 0) - require.Len(t, exporter2.payloads, 0) -} - -func TestNoContentLengthLimitSet(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - require.NoError(t, err) - reg := prometheus.NewRegistry() - - conf := &Config{} - - req.ContentLength = 89348593894 - - fr := NewAppAgentReceiverHandler(conf, []AppAgentReceiverExporter{}, reg) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) -} - -func TestLargePayload(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - require.NoError(t, err) - reg := prometheus.NewRegistry() - - conf := &Config{ - Server: ServerConfig{ - MaxAllowedPayloadSize: 10, - }, - } - - fr := NewAppAgentReceiverHandler(conf, []AppAgentReceiverExporter{}, reg) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusRequestEntityTooLarge, rr.Result().StatusCode) -} - -func TestAPIKeyRequiredButNotProvided(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - if err != nil { - t.Fatal(err) - } - - conf := &Config{ - Server: ServerConfig{ - APIKey: "foo", - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Result().StatusCode) -} - -func TestAPIKeyWrong(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - req.Header.Set("x-api-key", "bar") - - if err != nil { - t.Fatal(err) - } - - conf := &Config{ - Server: ServerConfig{ - APIKey: "foo", - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Result().StatusCode) -} - -func TestAPIKeyCorrect(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - req.Header.Set("x-api-key", "foo") - - if err != nil { - t.Fatal(err) - } - - conf := &Config{ - Server: ServerConfig{ - APIKey: "foo", - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) -} - -func TestRateLimiterNoReject(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - if err != nil { - t.Fatal(err) - } - - conf := &Config{ - Server: ServerConfig{ - RateLimiting: RateLimitingConfig{ - Burstiness: 10, - RPS: 10, - Enabled: true, - }, - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) -} - -func TestRateLimiterReject(t *testing.T) { - conf := &Config{ - Server: ServerConfig{ - RateLimiting: RateLimitingConfig{ - Burstiness: 2, - RPS: 1, - Enabled: true, - }, - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - makeRequest := func() *httptest.ResponseRecorder { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - require.NoError(t, err) - rr := httptest.NewRecorder() - handler.ServeHTTP(rr, req) - return rr - } - - r1 := makeRequest() - r2 := makeRequest() - r3 := makeRequest() - - require.Equal(t, http.StatusAccepted, r1.Result().StatusCode) - require.Equal(t, http.StatusAccepted, r2.Result().StatusCode) - require.Equal(t, http.StatusTooManyRequests, r3.Result().StatusCode) -} - -func TestRateLimiterDisabled(t *testing.T) { - req, err := http.NewRequest("POST", "/collect", bytes.NewBuffer([]byte(PAYLOAD))) - - if err != nil { - t.Fatal(err) - } - - conf := &Config{ - Server: ServerConfig{ - RateLimiting: RateLimitingConfig{ - Burstiness: 0, - RPS: 0, - Enabled: false, - }, - }, - } - - fr := NewAppAgentReceiverHandler(conf, nil, prometheus.NewRegistry()) - handler := fr.HTTPHandler(nil) - - rr := httptest.NewRecorder() - - handler.ServeHTTP(rr, req) - require.Equal(t, http.StatusAccepted, rr.Result().StatusCode) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/logs_exporter.go b/internal/static/integrations/v2/app_agent_receiver/logs_exporter.go deleted file mode 100644 index 31295a5060..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/logs_exporter.go +++ /dev/null @@ -1,140 +0,0 @@ -package app_agent_receiver - -import ( - "context" - "fmt" - "time" - - kitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/go-logfmt/logfmt" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/loki/clients/pkg/promtail/api" - "github.com/grafana/loki/pkg/logproto" - prommodel "github.com/prometheus/common/model" -) - -// logsInstance is an interface with capability to send log entries -type logsInstance interface { - SendEntry(entry api.Entry, dur time.Duration) bool -} - -// logsInstanceGetter is a function that returns a LogsInstance to send log entries to -type logsInstanceGetter func() (logsInstance, error) - -// LogsExporterConfig holds the configuration of the logs exporter -type LogsExporterConfig struct { - SendEntryTimeout time.Duration - GetLogsInstance logsInstanceGetter - Labels map[string]string -} - -// LogsExporter will send logs & errors to loki -type LogsExporter struct { - getLogsInstance logsInstanceGetter - sendEntryTimeout time.Duration - logger kitlog.Logger - labels map[string]string - sourceMapStore SourceMapStore -} - -// NewLogsExporter creates a new logs exporter with the given -// configuration -func NewLogsExporter(logger kitlog.Logger, conf LogsExporterConfig, sourceMapStore SourceMapStore) AppAgentReceiverExporter { - return &LogsExporter{ - logger: logger, - getLogsInstance: conf.GetLogsInstance, - sendEntryTimeout: conf.SendEntryTimeout, - labels: conf.Labels, - sourceMapStore: sourceMapStore, - } -} - -// Name of the exporter, for logging purposes -func (le *LogsExporter) Name() string { - return "logs exporter" -} - -// Export implements the AppDataExporter interface -func (le *LogsExporter) Export(ctx context.Context, payload Payload) error { - meta := payload.Meta.KeyVal() - - var err error - - // log events - for _, logItem := range payload.Logs { - kv := logItem.KeyVal() - MergeKeyVal(kv, meta) - err = le.sendKeyValsToLogsPipeline(kv) - } - - // exceptions - for _, exception := range payload.Exceptions { - transformedException := TransformException(le.sourceMapStore, le.logger, &exception, payload.Meta.App.Release) - kv := transformedException.KeyVal() - MergeKeyVal(kv, meta) - err = le.sendKeyValsToLogsPipeline(kv) - } - - // measurements - for _, measurement := range payload.Measurements { - kv := measurement.KeyVal() - MergeKeyVal(kv, meta) - err = le.sendKeyValsToLogsPipeline(kv) - } - - // events - for _, event := range payload.Events { - kv := event.KeyVal() - MergeKeyVal(kv, meta) - err = le.sendKeyValsToLogsPipeline(kv) - } - - return err -} - -func (le *LogsExporter) sendKeyValsToLogsPipeline(kv *KeyVal) error { - line, err := logfmt.MarshalKeyvals(KeyValToInterfaceSlice(kv)...) - if err != nil { - level.Error(le.logger).Log("msg", "failed to logfmt a frontend log event", "err", err) - return err - } - instance, err := le.getLogsInstance() - if err != nil { - return err - } - sent := instance.SendEntry(api.Entry{ - Labels: le.labelSet(kv), - Entry: logproto.Entry{ - Timestamp: time.Now(), - Line: string(line), - }, - }, le.sendEntryTimeout) - if !sent { - level.Warn(le.logger).Log("msg", "failed to log frontend log event to logs pipeline") - return fmt.Errorf("failed to send app event to logs pipeline") - } - return nil -} - -func (le *LogsExporter) labelSet(kv *KeyVal) prommodel.LabelSet { - set := make(prommodel.LabelSet, len(le.labels)) - - for k, v := range le.labels { - if len(v) > 0 { - set[prommodel.LabelName(k)] = prommodel.LabelValue(v) - } else { - if val, ok := kv.Get(k); ok { - set[prommodel.LabelName(k)] = prommodel.LabelValue(fmt.Sprint(val)) - } - } - } - - return set -} - -// Static typecheck tests -var ( - _ AppAgentReceiverExporter = (*LogsExporter)(nil) - _ logsInstance = (*logs.Instance)(nil) -) diff --git a/internal/static/integrations/v2/app_agent_receiver/logs_exporter_test.go b/internal/static/integrations/v2/app_agent_receiver/logs_exporter_test.go deleted file mode 100644 index 784e2c85bf..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/logs_exporter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package app_agent_receiver - -import ( - "context" - "encoding/json" - "os" - "testing" - "time" - - kitlog "github.com/go-kit/log" - "github.com/grafana/loki/clients/pkg/promtail/api" - prommodel "github.com/prometheus/common/model" - - "github.com/stretchr/testify/require" -) - -func loadTestPayload(t *testing.T) Payload { - t.Helper() - // Safe to disable, this is a test. - // nolint:gosec - content, err := os.ReadFile("./testdata/payload.json") - require.NoError(t, err, "expected to be able to read file") - require.True(t, len(content) > 0) - var payload Payload - err = json.Unmarshal(content, &payload) - require.NoError(t, err) - return payload -} - -type testLogsInstance struct { - Entries []api.Entry -} - -func (i *testLogsInstance) SendEntry(entry api.Entry, dur time.Duration) bool { - i.Entries = append(i.Entries, entry) - return true -} - -type MockSourceMapStore struct{} - -func (store *MockSourceMapStore) GetSourceMap(sourceURL string, release string) (*SourceMap, error) { - return nil, nil -} - -func TestExportLogs(t *testing.T) { - ctx := context.Background() - inst := &testLogsInstance{ - Entries: []api.Entry{}, - } - - logger := kitlog.NewNopLogger() - - logsExporter := NewLogsExporter( - logger, - LogsExporterConfig{ - GetLogsInstance: func() (logsInstance, error) { return inst, nil }, - Labels: map[string]string{ - "app": "frontend", - "kind": "", - }, - SendEntryTimeout: 100, - }, - &MockSourceMapStore{}, - ) - - payload := loadTestPayload(t) - - err := logsExporter.Export(ctx, payload) - require.NoError(t, err) - - require.Len(t, inst.Entries, 6) - - // log1 - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("log"), - }, inst.Entries[0].Labels) - expectedLine := "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=log message=\"opened pricing page\" level=info context_component=AppRoot context_page=Pricing traceID=abcd spanID=def sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[0].Line) - - // log2 - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("log"), - }, inst.Entries[1].Labels) - expectedLine = "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=log message=\"loading price list\" level=trace context_component=AppRoot context_page=Pricing traceID=abcd spanID=ghj sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[1].Line) - - // exception - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("exception"), - }, inst.Entries[2].Labels) - expectedLine = "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=exception type=Error value=\"Cannot read property 'find' of undefined\" stacktrace=\"Error: Cannot read property 'find' of undefined\\n at ? (http://fe:3002/static/js/vendors~main.chunk.js:8639:42)\\n at dispatchAction (http://fe:3002/static/js/vendors~main.chunk.js:268095:9)\\n at scheduleUpdateOnFiber (http://fe:3002/static/js/vendors~main.chunk.js:273726:13)\\n at flushSyncCallbackQueue (http://fe:3002/static/js/vendors~main.chunk.js:263362:7)\\n at flushSyncCallbackQueueImpl (http://fe:3002/static/js/vendors~main.chunk.js:263374:13)\\n at runWithPriority$1 (http://fe:3002/static/js/vendors~main.chunk.js:263325:14)\\n at unstable_runWithPriority (http://fe:3002/static/js/vendors~main.chunk.js:291265:16)\\n at ? (http://fe:3002/static/js/vendors~main.chunk.js:263379:30)\\n at performSyncWorkOnRoot (http://fe:3002/static/js/vendors~main.chunk.js:274126:22)\\n at renderRootSync (http://fe:3002/static/js/vendors~main.chunk.js:274509:11)\\n at workLoopSync (http://fe:3002/static/js/vendors~main.chunk.js:274543:9)\\n at performUnitOfWork (http://fe:3002/static/js/vendors~main.chunk.js:274606:16)\\n at beginWork$1 (http://fe:3002/static/js/vendors~main.chunk.js:275746:18)\\n at beginWork (http://fe:3002/static/js/vendors~main.chunk.js:270944:20)\\n at updateFunctionComponent (http://fe:3002/static/js/vendors~main.chunk.js:269291:24)\\n at renderWithHooks (http://fe:3002/static/js/vendors~main.chunk.js:266969:22)\\n at ? (http://fe:3002/static/js/main.chunk.js:2600:74)\\n at useGetBooksQuery (http://fe:3002/static/js/main.chunk.js:1299:65)\\n at Module.useQuery (http://fe:3002/static/js/vendors~main.chunk.js:8495:85)\\n at useBaseQuery (http://fe:3002/static/js/vendors~main.chunk.js:8656:83)\\n at useDeepMemo (http://fe:3002/static/js/vendors~main.chunk.js:8696:14)\\n at ? (http://fe:3002/static/js/vendors~main.chunk.js:8657:55)\\n at QueryData.execute (http://fe:3002/static/js/vendors~main.chunk.js:7883:47)\\n at QueryData.getExecuteResult (http://fe:3002/static/js/vendors~main.chunk.js:7944:23)\\n at QueryData._this.getQueryResult (http://fe:3002/static/js/vendors~main.chunk.js:7790:19)\\n at new ApolloError (http://fe:3002/static/js/vendors~main.chunk.js:5164:24)\" hash=2735541995122471342 sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[2].Line) - - // measurement - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("measurement"), - }, inst.Entries[3].Labels) - expectedLine = "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=measurement type=foobar ttfb=14.000000 ttfcp=22.120000 ttfp=20.120000 traceID=abcd spanID=def context_hello=world sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[3].Line) - - // event 1 - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("event"), - }, inst.Entries[4].Labels) - expectedLine = "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=event event_name=click_login_button event_domain=frontend event_data_foo=bar event_data_one=two traceID=abcd spanID=def sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[4].Line) - - // event 2 - require.Equal(t, prommodel.LabelSet{ - prommodel.LabelName("app"): prommodel.LabelValue("frontend"), - prommodel.LabelName("kind"): prommodel.LabelValue("event"), - }, inst.Entries[5].Labels) - expectedLine = "timestamp=\"2021-09-30 10:46:17.68 +0000 UTC\" kind=event event_name=click_reset_password_button sdk_name=grafana-frontend-agent sdk_version=1.0.0 app_name=testapp app_release=0.8.2 app_version=abcdefg app_environment=production user_email=geralt@kaermorhen.org user_id=123 user_username=domasx2 user_attr_foo=bar session_id=abcd session_attr_time_elapsed=100s page_url=https://example.com/page browser_name=chrome browser_version=88.12.1 browser_os=linux browser_mobile=false view_name=foobar" - require.Equal(t, expectedLine, inst.Entries[5].Line) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/payload.go b/internal/static/integrations/v2/app_agent_receiver/payload.go deleted file mode 100644 index ca91a8842d..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/payload.go +++ /dev/null @@ -1,420 +0,0 @@ -package app_agent_receiver - -import ( - "fmt" - "sort" - "strconv" - "strings" - "time" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" - - "github.com/zeebo/xxh3" -) - -// Payload is the body of the receiver request -type Payload struct { - Exceptions []Exception `json:"exceptions,omitempty"` - Logs []Log `json:"logs,omitempty"` - Measurements []Measurement `json:"measurements,omitempty"` - Events []Event `json:"events,omitempty"` - Meta Meta `json:"meta,omitempty"` - Traces *Traces `json:"traces,omitempty"` -} - -// Frame struct represents a single stacktrace frame -type Frame struct { - Function string `json:"function,omitempty"` - Module string `json:"module,omitempty"` - Filename string `json:"filename,omitempty"` - Lineno int `json:"lineno,omitempty"` - Colno int `json:"colno,omitempty"` -} - -// String function converts a Frame into a human readable string -func (frame Frame) String() string { - module := "" - if len(frame.Module) > 0 { - module = frame.Module + "|" - } - return fmt.Sprintf("\n at %s (%s%s:%v:%v)", frame.Function, module, frame.Filename, frame.Lineno, frame.Colno) -} - -// Stacktrace is a collection of Frames -type Stacktrace struct { - Frames []Frame `json:"frames,omitempty"` -} - -// Exception struct controls all the data regarding an exception -type Exception struct { - Type string `json:"type,omitempty"` - Value string `json:"value,omitempty"` - Stacktrace *Stacktrace `json:"stacktrace,omitempty"` - Timestamp time.Time `json:"timestamp"` - Trace TraceContext `json:"trace,omitempty"` - Context ExceptionContext `json:"context,omitempty"` -} - -// Message string is concatenating of the Exception.Type and Exception.Value -func (e Exception) Message() string { - return fmt.Sprintf("%s: %s", e.Type, e.Value) -} - -// String is the string representation of an Exception -func (e Exception) String() string { - var stacktrace = e.Message() - if e.Stacktrace != nil { - for _, frame := range e.Stacktrace.Frames { - stacktrace += frame.String() - } - } - return stacktrace -} - -// KeyVal representation of the exception object -func (e Exception) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "timestamp", e.Timestamp.String()) - KeyValAdd(kv, "kind", "exception") - KeyValAdd(kv, "type", e.Type) - KeyValAdd(kv, "value", e.Value) - KeyValAdd(kv, "stacktrace", e.String()) - KeyValAdd(kv, "hash", strconv.FormatUint(xxh3.HashString(e.Value), 10)) - MergeKeyValWithPrefix(kv, KeyValFromMap(e.Context), "context_") - MergeKeyVal(kv, e.Trace.KeyVal()) - return kv -} - -// ExceptionContext is a string to string map structure that -// represents the context of an exception -type ExceptionContext map[string]string - -// TraceContext holds trace id and span id associated to an entity (log, exception, measurement...). -type TraceContext struct { - TraceID string `json:"trace_id"` - SpanID string `json:"span_id"` -} - -// KeyVal representation of the trace context object. -func (tc TraceContext) KeyVal() *KeyVal { - retv := NewKeyVal() - KeyValAdd(retv, "traceID", tc.TraceID) - KeyValAdd(retv, "spanID", tc.SpanID) - return retv -} - -// Traces wraps the otel traces model. -type Traces struct { - ptrace.Traces -} - -// UnmarshalJSON unmarshals Traces model. -func (t *Traces) UnmarshalJSON(b []byte) error { - unmarshaler := &ptrace.JSONUnmarshaler{} - td, err := unmarshaler.UnmarshalTraces(b) - if err != nil { - return err - } - *t = Traces{td} - return nil -} - -// MarshalJSON marshals Traces model to json. -func (t Traces) MarshalJSON() ([]byte, error) { - marshaler := &ptrace.JSONMarshaler{} - return marshaler.MarshalTraces(t.Traces) -} - -// SpanSlice unpacks Traces entity into a slice of Spans. -func (t Traces) SpanSlice() []ptrace.Span { - spans := make([]ptrace.Span, 0) - rss := t.ResourceSpans() - for i := 0; i < rss.Len(); i++ { - rs := rss.At(i) - ilss := rs.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { - s := ilss.At(j).Spans() - for si := 0; si < s.Len(); si++ { - spans = append(spans, s.At(si)) - } - } - } - return spans -} - -// SpanToKeyVal returns KeyVal representation of a Span. -func SpanToKeyVal(s ptrace.Span) *KeyVal { - kv := NewKeyVal() - if s.StartTimestamp() > 0 { - KeyValAdd(kv, "timestamp", s.StartTimestamp().AsTime().String()) - } - if s.EndTimestamp() > 0 { - KeyValAdd(kv, "end_timestamp", s.StartTimestamp().AsTime().String()) - } - KeyValAdd(kv, "kind", "span") - KeyValAdd(kv, "traceID", s.TraceID().String()) - KeyValAdd(kv, "spanID", s.SpanID().String()) - KeyValAdd(kv, "span_kind", s.Kind().String()) - KeyValAdd(kv, "name", s.Name()) - KeyValAdd(kv, "parent_spanID", s.ParentSpanID().String()) - s.Attributes().Range(func(k string, v pcommon.Value) bool { - KeyValAdd(kv, "attr_"+k, fmt.Sprintf("%v", v)) - return true - }) - - return kv -} - -// LogLevel is log level enum for incoming app logs -type LogLevel string - -const ( - // LogLevelTrace is "trace" - LogLevelTrace LogLevel = "trace" - // LogLevelDebug is "debug" - LogLevelDebug LogLevel = "debug" - // LogLevelInfo is "info" - LogLevelInfo LogLevel = "info" - // LogLevelWarning is "warning" - LogLevelWarning LogLevel = "warning" - // LogLevelError is "error" - LogLevelError LogLevel = "error" -) - -// LogContext is a string to string map structure that -// represents the context of a log message -type LogContext map[string]string - -// Log struct controls the data that come into a Log message -type Log struct { - Message string `json:"message,omitempty"` - LogLevel LogLevel `json:"level,omitempty"` - Context LogContext `json:"context,omitempty"` - Timestamp time.Time `json:"timestamp"` - Trace TraceContext `json:"trace,omitempty"` -} - -// KeyVal representation of a Log object -func (l Log) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "timestamp", l.Timestamp.String()) - KeyValAdd(kv, "kind", "log") - KeyValAdd(kv, "message", l.Message) - KeyValAdd(kv, "level", string(l.LogLevel)) - MergeKeyValWithPrefix(kv, KeyValFromMap(l.Context), "context_") - MergeKeyVal(kv, l.Trace.KeyVal()) - return kv -} - -// MeasurementContext is a string to string map structure that -// represents the context of a log message -type MeasurementContext map[string]string - -// Measurement holds the data for user provided measurements -type Measurement struct { - Type string `json:"type,omitempty"` - Values map[string]float64 `json:"values,omitempty"` - Timestamp time.Time `json:"timestamp,omitempty"` - Trace TraceContext `json:"trace,omitempty"` - Context MeasurementContext `json:"context,omitempty"` -} - -// KeyVal representation of the exception object -func (m Measurement) KeyVal() *KeyVal { - kv := NewKeyVal() - - KeyValAdd(kv, "timestamp", m.Timestamp.String()) - KeyValAdd(kv, "kind", "measurement") - KeyValAdd(kv, "type", m.Type) - - keys := make([]string, 0, len(m.Values)) - for k := range m.Values { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - KeyValAdd(kv, k, fmt.Sprintf("%f", m.Values[k])) - } - MergeKeyVal(kv, m.Trace.KeyVal()) - MergeKeyValWithPrefix(kv, KeyValFromMap(m.Context), "context_") - return kv -} - -// SDK holds metadata about the app agent that produced the event -type SDK struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` - Integrations []SDKIntegration `json:"integrations,omitempty"` -} - -// KeyVal produces key->value representation of Sdk metadata -func (sdk SDK) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "name", sdk.Name) - KeyValAdd(kv, "version", sdk.Version) - - if len(sdk.Integrations) > 0 { - integrations := make([]string, len(sdk.Integrations)) - - for i, integration := range sdk.Integrations { - integrations[i] = integration.String() - } - - KeyValAdd(kv, "integrations", strings.Join(integrations, ",")) - } - - return kv -} - -// SDKIntegration holds metadata about a plugin/integration on the app agent that collected and sent the event -type SDKIntegration struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` -} - -func (i SDKIntegration) String() string { - return fmt.Sprintf("%s:%s", i.Name, i.Version) -} - -// User holds metadata about the user related to an app event -type User struct { - Email string `json:"email,omitempty"` - ID string `json:"id,omitempty"` - Username string `json:"username,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` -} - -// KeyVal produces a key->value representation User metadata -func (u User) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "email", u.Email) - KeyValAdd(kv, "id", u.ID) - KeyValAdd(kv, "username", u.Username) - MergeKeyValWithPrefix(kv, KeyValFromMap(u.Attributes), "attr_") - return kv -} - -// Meta holds metadata about an app event -type Meta struct { - SDK SDK `json:"sdk,omitempty"` - App App `json:"app,omitempty"` - User User `json:"user,omitempty"` - Session Session `json:"session,omitempty"` - Page Page `json:"page,omitempty"` - Browser Browser `json:"browser,omitempty"` - View View `json:"view,omitempty"` -} - -// KeyVal produces key->value representation of the app event metadata -func (m Meta) KeyVal() *KeyVal { - kv := NewKeyVal() - MergeKeyValWithPrefix(kv, m.SDK.KeyVal(), "sdk_") - MergeKeyValWithPrefix(kv, m.App.KeyVal(), "app_") - MergeKeyValWithPrefix(kv, m.User.KeyVal(), "user_") - MergeKeyValWithPrefix(kv, m.Session.KeyVal(), "session_") - MergeKeyValWithPrefix(kv, m.Page.KeyVal(), "page_") - MergeKeyValWithPrefix(kv, m.Browser.KeyVal(), "browser_") - MergeKeyValWithPrefix(kv, m.View.KeyVal(), "view_") - return kv -} - -// Session holds metadata about the browser session the event originates from -type Session struct { - ID string `json:"id,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` -} - -// KeyVal produces key->value representation of the Session metadata -func (s Session) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "id", s.ID) - MergeKeyValWithPrefix(kv, KeyValFromMap(s.Attributes), "attr_") - return kv -} - -// Page holds metadata about the web page event originates from -type Page struct { - ID string `json:"id,omitempty"` - URL string `json:"url,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` -} - -// KeyVal produces key->val representation of Page metadata -func (p Page) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "id", p.ID) - KeyValAdd(kv, "url", p.URL) - MergeKeyValWithPrefix(kv, KeyValFromMap(p.Attributes), "attr_") - return kv -} - -// App holds metadata about the application event originates from -type App struct { - Name string `json:"name,omitempty"` - Release string `json:"release,omitempty"` - Version string `json:"version,omitempty"` - Environment string `json:"environment,omitempty"` -} - -// Event holds RUM event data -type Event struct { - Name string `json:"name"` - Domain string `json:"domain,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` - Timestamp time.Time `json:"timestamp,omitempty"` - Trace TraceContext `json:"trace,omitempty"` -} - -// KeyVal produces key -> value representation of Event metadata -func (e Event) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "timestamp", e.Timestamp.String()) - KeyValAdd(kv, "kind", "event") - KeyValAdd(kv, "event_name", e.Name) - KeyValAdd(kv, "event_domain", e.Domain) - if e.Attributes != nil { - MergeKeyValWithPrefix(kv, KeyValFromMap(e.Attributes), "event_data_") - } - MergeKeyVal(kv, e.Trace.KeyVal()) - return kv -} - -// KeyVal produces key-> value representation of App metadata -func (a App) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "name", a.Name) - KeyValAdd(kv, "release", a.Release) - KeyValAdd(kv, "version", a.Version) - KeyValAdd(kv, "environment", a.Environment) - return kv -} - -// Browser holds metadata about a client's browser -type Browser struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` - OS string `json:"os,omitempty"` - Mobile bool `json:"mobile,omitempty"` -} - -// KeyVal produces key->value representation of the Browser metadata -func (b Browser) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "name", b.Name) - KeyValAdd(kv, "version", b.Version) - KeyValAdd(kv, "os", b.OS) - KeyValAdd(kv, "mobile", fmt.Sprintf("%v", b.Mobile)) - return kv -} - -// View holds metadata about a view -type View struct { - Name string `json:"name,omitempty"` -} - -func (v View) KeyVal() *KeyVal { - kv := NewKeyVal() - KeyValAdd(kv, "name", v.Name) - return kv -} diff --git a/internal/static/integrations/v2/app_agent_receiver/payload_test.go b/internal/static/integrations/v2/app_agent_receiver/payload_test.go deleted file mode 100644 index b66792547a..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/payload_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package app_agent_receiver - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func loadTestData(t *testing.T, file string) []byte { - t.Helper() - // Safe to disable, this is a test. - // nolint:gosec - content, err := os.ReadFile(filepath.Join("testdata", file)) - require.NoError(t, err, "expected to be able to read file") - require.True(t, len(content) > 0) - return content -} - -func TestUnmarshalPayloadJSON(t *testing.T) { - content := loadTestData(t, "payload.json") - var payload Payload - err := json.Unmarshal(content, &payload) - require.NoError(t, err) - - now, err := time.Parse("2006-01-02T15:04:05Z0700", "2021-09-30T10:46:17.680Z") - require.NoError(t, err) - - require.Equal(t, Meta{ - SDK: SDK{ - Name: "grafana-frontend-agent", - Version: "1.0.0", - }, - App: App{ - Name: "testapp", - Release: "0.8.2", - Version: "abcdefg", - Environment: "production", - }, - User: User{ - Username: "domasx2", - ID: "123", - Email: "geralt@kaermorhen.org", - Attributes: map[string]string{"foo": "bar"}, - }, - Session: Session{ - ID: "abcd", - Attributes: map[string]string{"time_elapsed": "100s"}, - }, - Page: Page{ - URL: "https://example.com/page", - }, - Browser: Browser{ - Name: "chrome", - Version: "88.12.1", - OS: "linux", - Mobile: false, - }, - View: View{ - Name: "foobar", - }, - }, payload.Meta) - - require.Len(t, payload.Exceptions, 1) - require.Len(t, payload.Exceptions[0].Stacktrace.Frames, 26) - require.Equal(t, "Error", payload.Exceptions[0].Type) - require.Equal(t, "Cannot read property 'find' of undefined", payload.Exceptions[0].Value) - require.EqualValues(t, ExceptionContext{"ReactError": "Annoying Error", "component": "ReactErrorBoundary"}, payload.Exceptions[0].Context) - - require.Equal(t, []Log{ - { - Message: "opened pricing page", - LogLevel: LogLevelInfo, - Context: map[string]string{ - "component": "AppRoot", - "page": "Pricing", - }, - Timestamp: now, - Trace: TraceContext{ - TraceID: "abcd", - SpanID: "def", - }, - }, - { - Message: "loading price list", - LogLevel: LogLevelTrace, - Context: map[string]string{ - "component": "AppRoot", - "page": "Pricing", - }, - Timestamp: now, - Trace: TraceContext{ - TraceID: "abcd", - SpanID: "ghj", - }, - }, - }, payload.Logs) - - require.Equal(t, []Event{ - { - Name: "click_login_button", - Domain: "frontend", - Timestamp: now, - Attributes: map[string]string{ - "foo": "bar", - "one": "two", - }, - Trace: TraceContext{ - TraceID: "abcd", - SpanID: "def", - }, - }, - { - Name: "click_reset_password_button", - Timestamp: now, - }, - }, payload.Events) - - require.Len(t, payload.Measurements, 1) - - require.Equal(t, []Measurement{ - { - Type: "foobar", - Values: map[string]float64{ - "ttfp": 20.12, - "ttfcp": 22.12, - "ttfb": 14, - }, - Timestamp: now, - Trace: TraceContext{ - TraceID: "abcd", - SpanID: "def", - }, - Context: MeasurementContext{ - "hello": "world", - }, - }, - }, payload.Measurements) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_exporter.go b/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_exporter.go deleted file mode 100644 index ea74c97fdf..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_exporter.go +++ /dev/null @@ -1,61 +0,0 @@ -package app_agent_receiver - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" -) - -// ReceiverMetricsExporter is an app agent receiver exporter that will capture metrics -// about counts of logs, exceptions, measurements, traces being ingested -type ReceiverMetricsExporter struct { - totalLogs prometheus.Counter - totalMeasurements prometheus.Counter - totalExceptions prometheus.Counter - totalEvents prometheus.Counter -} - -// NewReceiverMetricsExporter creates a new ReceiverMetricsExporter -func NewReceiverMetricsExporter(reg prometheus.Registerer) AppAgentReceiverExporter { - exp := &ReceiverMetricsExporter{ - totalLogs: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "app_agent_receiver_logs_total", - Help: "Total number of ingested logs", - }), - totalMeasurements: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "app_agent_receiver_measurements_total", - Help: "Total number of ingested measurements", - }), - totalExceptions: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "app_agent_receiver_exceptions_total", - Help: "Total number of ingested exceptions", - }), - totalEvents: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "app_agent_receiver_events_total", - Help: "Total number of ingested events", - }), - } - - reg.MustRegister(exp.totalLogs, exp.totalExceptions, exp.totalMeasurements, exp.totalEvents) - - return exp -} - -// Name of the exporter, for logging purposes -func (re *ReceiverMetricsExporter) Name() string { - return "receiver metrics exporter" -} - -// Export implements the AppDataExporter interface -func (re *ReceiverMetricsExporter) Export(ctx context.Context, payload Payload) error { - re.totalExceptions.Add(float64(len(payload.Exceptions))) - re.totalLogs.Add(float64(len(payload.Logs))) - re.totalMeasurements.Add(float64(len(payload.Measurements))) - re.totalEvents.Add(float64(len(payload.Events))) - return nil -} - -// Static typecheck tests -var ( - _ AppAgentReceiverExporter = (*ReceiverMetricsExporter)(nil) -) diff --git a/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_test.go b/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_test.go deleted file mode 100644 index 5fde03caad..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/receiver_metrics_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package app_agent_receiver - -import ( - "context" - "fmt" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" -) - -type metricAssertion struct { - name string - value float64 -} - -func testcase(t *testing.T, payload Payload, assertions []metricAssertion) { - ctx := context.Background() - - reg := prometheus.NewRegistry() - - exporter := NewReceiverMetricsExporter(reg) - - err := exporter.Export(ctx, payload) - require.NoError(t, err) - - metrics, err := reg.Gather() - require.NoError(t, err) - - for _, assertion := range assertions { - found := false - for _, metric := range metrics { - if *metric.Name == assertion.name { - found = true - require.Len(t, metric.Metric, 1) - val := metric.Metric[0].Counter.Value - require.Equal(t, assertion.value, *val) - break - } - } - if !found { - require.Fail(t, fmt.Sprintf("metric [%s] not found", assertion.name)) - } - } -} - -func TestReceiverMetricsExport(t *testing.T) { - var payload Payload - payload.Logs = make([]Log, 2) - payload.Measurements = make([]Measurement, 3) - payload.Exceptions = make([]Exception, 4) - payload.Events = make([]Event, 5) - testcase(t, payload, []metricAssertion{ - { - name: "app_agent_receiver_logs_total", - value: 2, - }, - { - name: "app_agent_receiver_measurements_total", - value: 3, - }, - { - name: "app_agent_receiver_exceptions_total", - value: 4, - }, - { - name: "app_agent_receiver_events_total", - value: 5, - }, - }) -} - -func TestReceiverMetricsExportLogsOnly(t *testing.T) { - var payload Payload - payload.Logs = []Log{ - {}, - {}, - } - testcase(t, payload, []metricAssertion{ - { - name: "app_agent_receiver_logs_total", - value: 2, - }, - { - name: "app_agent_receiver_measurements_total", - value: 0, - }, - { - name: "app_agent_receiver_exceptions_total", - value: 0, - }, - }) -} - -func TestReceiverMetricsExportExceptionsOnly(t *testing.T) { - var payload Payload - payload.Exceptions = []Exception{ - {}, - {}, - {}, - {}, - } - testcase(t, payload, []metricAssertion{ - { - name: "app_agent_receiver_logs_total", - value: 0, - }, - { - name: "app_agent_receiver_measurements_total", - value: 0, - }, - { - name: "app_agent_receiver_exceptions_total", - value: 4, - }, - }) -} - -func TestReceiverMetricsExportMeasurementsOnly(t *testing.T) { - var payload Payload - payload.Measurements = []Measurement{ - {}, - {}, - {}, - } - testcase(t, payload, []metricAssertion{ - { - name: "app_agent_receiver_logs_total", - value: 0, - }, - { - name: "app_agent_receiver_measurements_total", - value: 3, - }, - { - name: "app_agent_receiver_exceptions_total", - value: 0, - }, - }) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/sourcemaps.go b/internal/static/integrations/v2/app_agent_receiver/sourcemaps.go deleted file mode 100644 index fe8935dd0b..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/sourcemaps.go +++ /dev/null @@ -1,357 +0,0 @@ -package app_agent_receiver - -import ( - "bytes" - "fmt" - "io" - "io/fs" - "net/http" - "net/url" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - "text/template" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/go-sourcemap/sourcemap" - "github.com/prometheus/client_golang/prometheus" - "github.com/vincent-petithory/dataurl" -) - -// SourceMapStore is interface for a sourcemap service capable of transforming -// minified source locations to original source location -type SourceMapStore interface { - GetSourceMap(sourceURL string, release string) (*SourceMap, error) -} - -type httpClient interface { - Get(url string) (resp *http.Response, err error) -} - -// FileService is interface for a service that can be used to load source maps -// from file system -type fileService interface { - Stat(name string) (fs.FileInfo, error) - ReadFile(name string) ([]byte, error) -} - -type osFileService struct{} - -func (s *osFileService) Stat(name string) (fs.FileInfo, error) { - return os.Stat(name) -} - -func (s *osFileService) ReadFile(name string) ([]byte, error) { - return os.ReadFile(name) -} - -var reSourceMap = "//[#@]\\s(source(?:Mapping)?URL)=\\s*(?P\\S+)\r?\n?$" - -// SourceMap is a wrapper for go-sourcemap consumer -type SourceMap struct { - consumer *sourcemap.Consumer -} - -type sourceMapMetrics struct { - cacheSize *prometheus.CounterVec - downloads *prometheus.CounterVec - fileReads *prometheus.CounterVec -} - -type sourcemapFileLocation struct { - SourceMapFileLocation - pathTemplate *template.Template -} - -// RealSourceMapStore is an implementation of SourceMapStore -// that can download source maps or read them from file system -type RealSourceMapStore struct { - sync.Mutex - l log.Logger - httpClient httpClient - fileService fileService - config SourceMapConfig - cache map[string]*SourceMap - fileLocations []*sourcemapFileLocation - metrics *sourceMapMetrics -} - -// NewSourceMapStore creates an instance of SourceMapStore. -// httpClient and fileService will be instantiated to defaults if nil is provided -func NewSourceMapStore(l log.Logger, config SourceMapConfig, reg prometheus.Registerer, httpClient httpClient, fileService fileService) SourceMapStore { - if httpClient == nil { - httpClient = &http.Client{ - Timeout: config.DownloadTimeout, - } - } - - if fileService == nil { - fileService = &osFileService{} - } - - metrics := &sourceMapMetrics{ - cacheSize: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "app_agent_receiver_sourcemap_cache_size", - Help: "number of items in source map cache, per origin", - }, []string{"origin"}), - downloads: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "app_agent_receiver_sourcemap_downloads_total", - Help: "downloads by the source map service", - }, []string{"origin", "http_status"}), - fileReads: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "app_agent_receiver_sourcemap_file_reads_total", - Help: "source map file reads from file system, by origin and status", - }, []string{"origin", "status"}), - } - reg.MustRegister(metrics.cacheSize, metrics.downloads, metrics.fileReads) - - fileLocations := []*sourcemapFileLocation{} - - for _, configLocation := range config.FileSystem { - tpl, err := template.New(configLocation.Path).Parse(configLocation.Path) - if err != nil { - panic(err) - } - - fileLocations = append(fileLocations, &sourcemapFileLocation{ - SourceMapFileLocation: configLocation, - pathTemplate: tpl, - }) - } - - return &RealSourceMapStore{ - l: l, - httpClient: httpClient, - fileService: fileService, - config: config, - cache: make(map[string]*SourceMap), - metrics: metrics, - fileLocations: fileLocations, - } -} - -func (store *RealSourceMapStore) downloadFileContents(url string) ([]byte, error) { - resp, err := store.httpClient.Get(url) - if err != nil { - store.metrics.downloads.WithLabelValues(getOrigin(url), "?").Inc() - return nil, err - } - defer resp.Body.Close() - store.metrics.downloads.WithLabelValues(getOrigin(url), fmt.Sprint(resp.StatusCode)).Inc() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("unexpected status %v", resp.StatusCode) - } - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return body, nil -} - -func (store *RealSourceMapStore) downloadSourceMapContent(sourceURL string) (content []byte, resolvedSourceMapURL string, err error) { - level.Debug(store.l).Log("msg", "attempting to download source file", "url", sourceURL) - - result, err := store.downloadFileContents(sourceURL) - if err != nil { - level.Debug(store.l).Log("msg", "failed to download source file", "url", sourceURL, "err", err) - return nil, "", err - } - r := regexp.MustCompile(reSourceMap) - match := r.FindAllStringSubmatch(string(result), -1) - if len(match) == 0 { - level.Debug(store.l).Log("msg", "no source map url found in source", "url", sourceURL) - return nil, "", nil - } - sourceMapURL := match[len(match)-1][2] - - // inline sourcemap - if strings.HasPrefix(sourceMapURL, "data:") { - dataURL, err := dataurl.DecodeString(sourceMapURL) - if err != nil { - level.Debug(store.l).Log("msg", "failed to parse inline source map data url", "url", sourceURL, "err", err) - return nil, "", err - } - - level.Info(store.l).Log("msg", "successfully parsed inline source map data url", "url", sourceURL) - return dataURL.Data, sourceURL + ".map", nil - } - // remote sourcemap - resolvedSourceMapURL = sourceMapURL - - // if url is relative, attempt to resolve absolute - if !strings.HasPrefix(resolvedSourceMapURL, "http") { - base, err := url.Parse(sourceURL) - if err != nil { - level.Debug(store.l).Log("msg", "failed to parse source url", "url", sourceURL, "err", err) - return nil, "", err - } - relative, err := url.Parse(sourceMapURL) - if err != nil { - level.Debug(store.l).Log("msg", "failed to parse source map url", "url", sourceURL, "sourceMapURL", sourceMapURL, "err", err) - return nil, "", err - } - resolvedSourceMapURL = base.ResolveReference(relative).String() - level.Debug(store.l).Log("msg", "resolved absolute source map url", "url", sourceURL, "sourceMapURL", resolvedSourceMapURL) - } - level.Debug(store.l).Log("msg", "attempting to download source map file", "url", resolvedSourceMapURL) - result, err = store.downloadFileContents(resolvedSourceMapURL) - if err != nil { - level.Debug(store.l).Log("failed to download source map file", "url", resolvedSourceMapURL, "err", err) - return nil, "", err - } - return result, resolvedSourceMapURL, nil -} - -func (store *RealSourceMapStore) getSourceMapFromFileSystem(sourceURL string, release string, fileconf *sourcemapFileLocation) (content []byte, sourceMapURL string, err error) { - if len(sourceURL) == 0 || !strings.HasPrefix(sourceURL, fileconf.MinifiedPathPrefix) || strings.HasSuffix(sourceURL, "/") { - return nil, "", nil - } - - var rootPath bytes.Buffer - - err = fileconf.pathTemplate.Execute(&rootPath, struct{ Release string }{Release: cleanFilePathPart(release)}) - if err != nil { - return nil, "", err - } - - pathParts := []string{rootPath.String()} - for _, part := range strings.Split(strings.TrimPrefix(strings.Split(sourceURL, "?")[0], fileconf.MinifiedPathPrefix), "/") { - if len(part) > 0 && part != "." && part != ".." { - pathParts = append(pathParts, part) - } - } - mapFilePath := filepath.Join(pathParts...) + ".map" - - if _, err := store.fileService.Stat(mapFilePath); err != nil { - store.metrics.fileReads.WithLabelValues(getOrigin(sourceURL), "not_found").Inc() - level.Debug(store.l).Log("msg", "source map not found on filesystem", "url", sourceURL, "file_path", mapFilePath) - return nil, "", nil - } - level.Debug(store.l).Log("msg", "source map found on filesystem", "url", mapFilePath, "file_path", mapFilePath) - - content, err = store.fileService.ReadFile(mapFilePath) - if err != nil { - store.metrics.fileReads.WithLabelValues(getOrigin(sourceURL), "error").Inc() - } else { - store.metrics.fileReads.WithLabelValues(getOrigin(sourceURL), "ok").Inc() - } - return content, sourceURL, err -} - -func (store *RealSourceMapStore) getSourceMapContent(sourceURL string, release string) (content []byte, sourceMapURL string, err error) { - //attempt to find in fs - for _, fileconf := range store.fileLocations { - content, sourceMapURL, err = store.getSourceMapFromFileSystem(sourceURL, release, fileconf) - if content != nil || err != nil { - return content, sourceMapURL, err - } - } - - //attempt to download - if strings.HasPrefix(sourceURL, "http") && urlMatchesOrigins(sourceURL, store.config.DownloadFromOrigins) { - return store.downloadSourceMapContent(sourceURL) - } - return nil, "", nil -} - -// GetSourceMap returns sourcemap for a given source url -func (store *RealSourceMapStore) GetSourceMap(sourceURL string, release string) (*SourceMap, error) { - store.Lock() - defer store.Unlock() - - cacheKey := fmt.Sprintf("%s__%s", sourceURL, release) - - if smap, ok := store.cache[cacheKey]; ok { - return smap, nil - } - content, sourceMapURL, err := store.getSourceMapContent(sourceURL, release) - if err != nil || content == nil { - store.cache[cacheKey] = nil - return nil, err - } - if content != nil { - consumer, err := sourcemap.Parse(sourceMapURL, content) - if err != nil { - store.cache[cacheKey] = nil - level.Debug(store.l).Log("msg", "failed to parse source map", "url", sourceMapURL, "release", release, "err", err) - return nil, err - } - level.Info(store.l).Log("msg", "successfully parsed source map", "url", sourceMapURL, "release", release) - smap := &SourceMap{ - consumer: consumer, - } - store.cache[cacheKey] = smap - store.metrics.cacheSize.WithLabelValues(getOrigin(sourceURL)).Inc() - return smap, nil - } - return nil, nil -} - -// ResolveSourceLocation resolves minified source location to original source location -func ResolveSourceLocation(store SourceMapStore, frame *Frame, release string) (*Frame, error) { - smap, err := store.GetSourceMap(frame.Filename, release) - if err != nil { - return nil, err - } - if smap == nil { - return nil, nil - } - - file, function, line, col, ok := smap.consumer.Source(frame.Lineno, frame.Colno) - if !ok { - return nil, nil - } - // unfortunately in many cases go-sourcemap fails to determine the original function name. - // not a big issue as long as file, line and column are correct - if len(function) == 0 { - function = "?" - } - return &Frame{ - Filename: file, - Lineno: line, - Colno: col, - Function: function, - }, nil -} - -// TransformException will attempt to resolve all minified source locations in the stacktrace with original source locations -func TransformException(store SourceMapStore, log log.Logger, ex *Exception, release string) *Exception { - if ex.Stacktrace == nil { - return ex - } - frames := []Frame{} - - for _, frame := range ex.Stacktrace.Frames { - mappedFrame, err := ResolveSourceLocation(store, &frame, release) - if err != nil { - level.Error(log).Log("msg", "Error resolving stack trace frame source location", "err", err) - frames = append(frames, frame) - } else if mappedFrame != nil { - frames = append(frames, *mappedFrame) - } else { - frames = append(frames, frame) - } - } - - return &Exception{ - Type: ex.Type, - Value: ex.Value, - Stacktrace: &Stacktrace{Frames: frames}, - Timestamp: ex.Timestamp, - } -} - -func cleanFilePathPart(x string) string { - return strings.TrimLeft(strings.ReplaceAll(strings.ReplaceAll(x, "\\", ""), "/", ""), ".") -} - -func getOrigin(URL string) string { - parsed, err := url.Parse(URL) - if err != nil { - return "?" - } - return fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/sourcemaps_test.go b/internal/static/integrations/v2/app_agent_receiver/sourcemaps_test.go deleted file mode 100644 index e9f7a5bfd6..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/sourcemaps_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package app_agent_receiver - -import ( - "bytes" - "errors" - "io" - "io/fs" - "net/http" - "path/filepath" - "testing" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" -) - -type mockHTTPClient struct { - responses []struct { - *http.Response - error - } - requests []string -} - -func (cl *mockHTTPClient) Get(url string) (resp *http.Response, err error) { - if len(cl.responses) > len(cl.requests) { - r := cl.responses[len(cl.requests)] - cl.requests = append(cl.requests, url) - return r.Response, r.error - } - return nil, errors.New("mockHTTPClient got more requests than expected") -} - -type mockFileService struct { - files map[string][]byte - stats []string - reads []string -} - -func (s *mockFileService) Stat(name string) (fs.FileInfo, error) { - s.stats = append(s.stats, name) - _, ok := s.files[name] - if !ok { - return nil, errors.New("file not found") - } - return nil, nil -} - -func (s *mockFileService) ReadFile(name string) ([]byte, error) { - s.reads = append(s.reads, name) - content, ok := s.files[name] - if ok { - return content, nil - } - return nil, errors.New("file not found") -} - -func newResponseFromTestData(t *testing.T, file string) *http.Response { - return &http.Response{ - Body: io.NopCloser(bytes.NewReader(loadTestData(t, file))), - StatusCode: 200, - } -} - -func mockException() *Exception { - return &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://localhost:1234/foo.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 5, - Filename: "http://localhost:1234/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - }, - }, - } -} - -func Test_RealSourceMapStore_DownloadSuccess(t *testing.T) { - conf := SourceMapConfig{ - Download: true, - DownloadFromOrigins: []string{"*"}, - } - - httpClient := &mockHTTPClient{ - responses: []struct { - *http.Response - error - }{ - {newResponseFromTestData(t, "foo.js"), nil}, - {newResponseFromTestData(t, "foo.js.map"), nil}, - }, - } - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), httpClient, &mockFileService{}) - - exception := mockException() - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{"http://localhost:1234/foo.js", "http://localhost:1234/foo.js.map"}, httpClient.requests) - - expected := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 37, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 6, - }, - { - Colno: 2, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 7, - }, - }, - }, - } - - require.Equal(t, *expected, *transformed) -} - -func Test_RealSourceMapStore_DownloadError(t *testing.T) { - conf := SourceMapConfig{ - Download: true, - DownloadFromOrigins: []string{"*"}, - } - - resp := &http.Response{ - StatusCode: 500, - Body: io.NopCloser(bytes.NewReader([]byte{})), - } - - httpClient := &mockHTTPClient{ - responses: []struct { - *http.Response - error - }{ - {resp, nil}, - }, - } - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), httpClient, &mockFileService{}) - - exception := mockException() - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{"http://localhost:1234/foo.js"}, httpClient.requests) - require.Equal(t, exception, transformed) -} - -func Test_RealSourceMapStore_DownloadHTTPOriginFiltering(t *testing.T) { - conf := SourceMapConfig{ - Download: true, - DownloadFromOrigins: []string{"http://bar.com/"}, - } - - httpClient := &mockHTTPClient{ - responses: []struct { - *http.Response - error - }{ - {newResponseFromTestData(t, "foo.js"), nil}, - {newResponseFromTestData(t, "foo.js.map"), nil}, - }, - } - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), httpClient, &mockFileService{}) - - exception := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/foo.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 5, - Filename: "http://bar.com/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - }, - }, - } - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{"http://bar.com/foo.js", "http://bar.com/foo.js.map"}, httpClient.requests) - - expected := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/foo.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 2, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 7, - }, - }, - }, - } - - require.Equal(t, *expected, *transformed) -} - -func Test_RealSourceMapStore_ReadFromFileSystem(t *testing.T) { - conf := SourceMapConfig{ - Download: false, - FileSystem: []SourceMapFileLocation{ - { - MinifiedPathPrefix: "http://foo.com/", - Path: filepath.FromSlash("/var/build/latest/"), - }, - { - MinifiedPathPrefix: "http://bar.com/", - Path: filepath.FromSlash("/var/build/{{ .Release }}/"), - }, - }, - } - - mapFile := loadTestData(t, "foo.js.map") - - fileService := &mockFileService{ - files: map[string][]byte{ - filepath.FromSlash("/var/build/latest/foo.js.map"): mapFile, - filepath.FromSlash("/var/build/123/foo.js.map"): mapFile, - }, - } - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), &mockHTTPClient{}, fileService) - - exception := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/foo.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 6, - Filename: "http://foo.com/bar.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 5, - Filename: "http://bar.com/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - { - Colno: 5, - Filename: "http://baz.com/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - }, - }, - } - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{ - filepath.FromSlash("/var/build/latest/foo.js.map"), - filepath.FromSlash("/var/build/latest/bar.js.map"), - filepath.FromSlash("/var/build/123/foo.js.map"), - }, fileService.stats) - require.Equal(t, []string{ - filepath.FromSlash("/var/build/latest/foo.js.map"), - filepath.FromSlash("/var/build/123/foo.js.map"), - }, fileService.reads) - - expected := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 37, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 6, - }, - { - Colno: 6, - Filename: "http://foo.com/bar.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 2, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 7, - }, - { - Colno: 5, - Filename: "http://baz.com/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - }, - }, - } - - require.Equal(t, *expected, *transformed) -} - -func Test_RealSourceMapStore_ReadFromFileSystemAndDownload(t *testing.T) { - conf := SourceMapConfig{ - Download: true, - DownloadFromOrigins: []string{"*"}, - FileSystem: []SourceMapFileLocation{ - { - MinifiedPathPrefix: "http://foo.com/", - Path: filepath.FromSlash("/var/build/latest/"), - }, - }, - } - - mapFile := loadTestData(t, "foo.js.map") - - fileService := &mockFileService{ - files: map[string][]byte{ - filepath.FromSlash("/var/build/latest/foo.js.map"): mapFile, - }, - } - - httpClient := &mockHTTPClient{ - responses: []struct { - *http.Response - error - }{ - {newResponseFromTestData(t, "foo.js"), nil}, - {newResponseFromTestData(t, "foo.js.map"), nil}, - }, - } - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), httpClient, fileService) - - exception := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/foo.js", - Function: "eval", - Lineno: 5, - }, - { - Colno: 5, - Filename: "http://bar.com/foo.js", - Function: "callUndefined", - Lineno: 6, - }, - }, - }, - } - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{filepath.FromSlash("/var/build/latest/foo.js.map")}, fileService.stats) - require.Equal(t, []string{filepath.FromSlash("/var/build/latest/foo.js.map")}, fileService.reads) - require.Equal(t, []string{"http://bar.com/foo.js", "http://bar.com/foo.js.map"}, httpClient.requests) - - expected := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 37, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 6, - }, - { - Colno: 2, - Filename: "/__parcel_source_root/demo/src/actions.ts", - Function: "?", - Lineno: 7, - }, - }, - }, - } - - require.Equal(t, *expected, *transformed) -} - -func Test_RealSourceMapStore_FilepathSanitized(t *testing.T) { - conf := SourceMapConfig{ - Download: false, - FileSystem: []SourceMapFileLocation{ - { - MinifiedPathPrefix: "http://foo.com/", - Path: filepath.FromSlash("/var/build/latest/"), - }, - }, - } - - fileService := &mockFileService{} - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), &mockHTTPClient{}, fileService) - - exception := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/../../../etc/passwd", - Function: "eval", - Lineno: 5, - }, - }, - }, - } - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{ - filepath.FromSlash("/var/build/latest/etc/passwd.map"), - }, fileService.stats) - require.Len(t, fileService.reads, 0) - - require.Equal(t, *exception, *transformed) -} - -func Test_RealSourceMapStore_FilepathQueryParamsOmitted(t *testing.T) { - conf := SourceMapConfig{ - Download: false, - FileSystem: []SourceMapFileLocation{ - { - MinifiedPathPrefix: "http://foo.com/", - Path: filepath.FromSlash("/var/build/latest/"), - }, - }, - } - - fileService := &mockFileService{} - - logger := log.NewNopLogger() - - sourceMapStore := NewSourceMapStore(logger, conf, prometheus.NewRegistry(), &mockHTTPClient{}, fileService) - - exception := &Exception{ - Stacktrace: &Stacktrace{ - Frames: []Frame{ - { - Colno: 6, - Filename: "http://foo.com/static/foo.js?v=1233", - Function: "eval", - Lineno: 5, - }, - }, - }, - } - - transformed := TransformException(sourceMapStore, logger, exception, "123") - - require.Equal(t, []string{ - filepath.FromSlash("/var/build/latest/static/foo.js.map"), - }, fileService.stats) - require.Len(t, fileService.reads, 0) - - require.Equal(t, *exception, *transformed) -} diff --git a/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js b/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js deleted file mode 100644 index b38652a4ee..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js +++ /dev/null @@ -1,39 +0,0 @@ -function throwError() { - throw new Error('This is a thrown error'); -} -function callUndefined() { - // eslint-disable-next-line no-eval - eval('test();'); -} -function callConsole(method) { - // eslint-disable-next-line no-console - console[method](`This is a console ${method} message`); -} -function fetchError() { - fetch('http://localhost:12345', { - method: 'POST' - }); -} -function promiseReject() { - new Promise((_accept, reject)=>{ - reject('This is a rejected promise'); - }); -} -function fetchSuccess() { - fetch('http://localhost:1234'); -} -function sendCustomMetric() { - window.grafanaJavaScriptAgent.api.pushMeasurement({ - type: 'custom', - values: { - my_custom_metric: Math.random() - } - }); -} -window.addEventListener('load', ()=>{ - window.grafanaJavaScriptAgent.api.pushLog([ - 'Manual event from Home' - ]); -}); - -//# sourceMappingURL=foo.js.map diff --git a/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js.map b/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js.map deleted file mode 100644 index 0cd4998974..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/testdata/foo.js.map +++ /dev/null @@ -1 +0,0 @@ -{"mappings":"SAAS,UAAU,GAAG,CAAC;IACrB,KAAK,CAAC,GAAG,CAAC,KAAK,CAAC,CAAwB;AAC1C,CAAC;SAEQ,aAAa,GAAG,CAAC;IACxB,EAAmC,AAAnC,iCAAmC;IACnC,IAAI,CAAC,CAAS;AAChB,CAAC;SAEQ,WAAW,CAAC,MAAmD,EAAE,CAAC;IACzE,EAAsC,AAAtC,oCAAsC;IACtC,OAAO,CAAC,MAAM,GAAG,kBAAkB,EAAE,MAAM,CAAC,QAAQ;AACtD,CAAC;SAEQ,UAAU,GAAG,CAAC;IACrB,KAAK,CAAC,CAAwB,yBAAE,CAAC;QAC/B,MAAM,EAAE,CAAM;IAChB,CAAC;AACH,CAAC;SAEQ,aAAa,GAAG,CAAC;IACxB,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,MAAM,GAAK,CAAC;QAChC,MAAM,CAAC,CAA4B;IACrC,CAAC;AACH,CAAC;SAEQ,YAAY,GAAG,CAAC;IACvB,KAAK,CAAC,CAAuB;AAC/B,CAAC;SAEQ,gBAAgB,GAAG,CAAC;IAC1B,MAAM,CAAS,sBAAsB,CAAC,GAAG,CAAC,eAAe,CAAC,CAAC;QAC1D,IAAI,EAAE,CAAQ;QACd,MAAM,EAAE,CAAC;YACP,gBAAgB,EAAE,IAAI,CAAC,MAAM;QAC/B,CAAC;IACH,CAAC;AACH,CAAC;AAED,MAAM,CAAC,gBAAgB,CAAC,CAAM,WAAQ,CAAC;IACpC,MAAM,CAAS,sBAAsB,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAAA,CAAwB;IAAA,CAAC;AAC/E,CAAC","sources":["demo/src/actions.ts"],"sourcesContent":["function throwError() {\n throw new Error('This is a thrown error');\n}\n\nfunction callUndefined() {\n // eslint-disable-next-line no-eval\n eval('test();');\n}\n\nfunction callConsole(method: 'trace' | 'info' | 'log' | 'warn' | 'error') {\n // eslint-disable-next-line no-console\n console[method](`This is a console ${method} message`);\n}\n\nfunction fetchError() {\n fetch('http://localhost:12345', {\n method: 'POST',\n });\n}\n\nfunction promiseReject() {\n new Promise((_accept, reject) => {\n reject('This is a rejected promise');\n });\n}\n\nfunction fetchSuccess() {\n fetch('http://localhost:1234');\n}\n\nfunction sendCustomMetric() {\n (window as any).grafanaJavaScriptAgent.api.pushMeasurement({\n type: 'custom',\n values: {\n my_custom_metric: Math.random(),\n },\n });\n}\n\nwindow.addEventListener('load', () => {\n (window as any).grafanaJavaScriptAgent.api.pushLog(['Manual event from Home']);\n});\n"],"names":[],"version":3,"file":"index.28a7d598.js.map","sourceRoot":"/__parcel_source_root/"} \ No newline at end of file diff --git a/internal/static/integrations/v2/app_agent_receiver/testdata/payload.json b/internal/static/integrations/v2/app_agent_receiver/testdata/payload.json deleted file mode 100644 index b6ac7efce0..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/testdata/payload.json +++ /dev/null @@ -1,330 +0,0 @@ -{ - "logs": [ - { - "message": "opened pricing page", - "level": "info", - "context": { - "component": "AppRoot", - "page": "Pricing" - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - } - }, - { - "message": "loading price list", - "level": "trace", - "context": { - "component": "AppRoot", - "page": "Pricing" - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "ghj" - } - } - ], - "exceptions": [ - { - "type": "Error", - "value": "Cannot read property 'find' of undefined", - "stacktrace": { - "frames": [ - { - "colno": 42, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "in_app": true, - "lineno": 8639 - }, - { - "colno": 9, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "dispatchAction", - "in_app": true, - "lineno": 268095 - }, - { - "colno": 13, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "scheduleUpdateOnFiber", - "in_app": true, - "lineno": 273726 - }, - { - "colno": 7, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "flushSyncCallbackQueue", - "in_app": true, - "lineno": 263362 - }, - { - "colno": 13, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "flushSyncCallbackQueueImpl", - "in_app": true, - "lineno": 263374 - }, - { - "colno": 14, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "runWithPriority$1", - "lineno": 263325 - }, - { - "colno": 16, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "unstable_runWithPriority", - "lineno": 291265 - }, - { - "colno": 30, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "lineno": 263379 - }, - { - "colno": 22, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "performSyncWorkOnRoot", - "lineno": 274126 - }, - { - "colno": 11, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "renderRootSync", - "lineno": 274509 - }, - { - "colno": 9, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "workLoopSync", - "lineno": 274543 - }, - { - "colno": 16, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "performUnitOfWork", - "lineno": 274606 - }, - { - "colno": 18, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "beginWork$1", - "in_app": true, - "lineno": 275746 - }, - { - "colno": 20, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "beginWork", - "lineno": 270944 - }, - { - "colno": 24, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "updateFunctionComponent", - "lineno": 269291 - }, - { - "colno": 22, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "renderWithHooks", - "lineno": 266969 - }, - { - "colno": 74, - "filename": "http://fe:3002/static/js/main.chunk.js", - "function": "?", - "in_app": true, - "lineno": 2600 - }, - { - "colno": 65, - "filename": "http://fe:3002/static/js/main.chunk.js", - "function": "useGetBooksQuery", - "lineno": 1299 - }, - { - "colno": 85, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "Module.useQuery", - "lineno": 8495 - }, - { - "colno": 83, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "useBaseQuery", - "in_app": true, - "lineno": 8656 - }, - { - "colno": 14, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "useDeepMemo", - "lineno": 8696 - }, - { - "colno": 55, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "lineno": 8657 - }, - { - "colno": 47, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData.execute", - "in_app": true, - "lineno": 7883 - }, - { - "colno": 23, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData.getExecuteResult", - "lineno": 7944 - }, - { - "colno": 19, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData._this.getQueryResult", - "lineno": 7790 - }, - { - "colno": 24, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "new ApolloError", - "in_app": true, - "lineno": 5164 - } - ] - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - }, - "context": { - "component": "ReactErrorBoundary", - "ReactError": "Annoying Error" - } - } - ], - "measurements": [ - { - "type": "foobar", - "values": { - "ttfp": 20.12, - "ttfcp": 22.12, - "ttfb": 14 - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - }, - "context": { - "hello": "world" - } - } - ], - "events": [ - { - "name": "click_login_button", - "domain": "frontend", - "attributes": { - "foo": "bar", - "one": "two" - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - } - }, - { - "name": "click_reset_password_button", - "timestamp": "2021-09-30T10:46:17.680Z" - } - ], - "meta": { - "sdk": { - "name": "grafana-frontend-agent", - "version": "1.0.0" - }, - "app": { - "name": "testapp", - "release": "0.8.2", - "version": "abcdefg", - "environment": "production" - }, - "user": { - "username": "domasx2", - "id": "123", - "email": "geralt@kaermorhen.org", - "attributes": { - "foo": "bar" - } - }, - "session": { - "id": "abcd", - "attributes": { - "time_elapsed": "100s" - } - }, - "page": { - "url": "https://example.com/page" - }, - "browser": { - "name": "chrome", - "version": "88.12.1", - "os": "linux", - "mobile": false - }, - "view": { - "name": "foobar" - } - }, - "traces": { - "resourceSpans": [ - { - "resource": { - "attributes": [ - { - "key": "host.name", - "value": { - "stringValue": "testHost" - } - } - ] - }, - "instrumentationLibrarySpans": [ - { - "instrumentationLibrary": { - "name": "name", - "version": "version" - }, - "spans": [ - { - "traceId": "", - "spanId": "", - "parentSpanId": "", - "name": "testSpan", - "status": {} - }, - { - "traceId": "", - "spanId": "", - "parentSpanId": "", - "name": "testSpan2", - "status": {} - } - ] - } - ] - } - ] - } -} diff --git a/internal/static/integrations/v2/app_agent_receiver/testdata/payload_2.json b/internal/static/integrations/v2/app_agent_receiver/testdata/payload_2.json deleted file mode 100644 index eb8b18e565..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/testdata/payload_2.json +++ /dev/null @@ -1,393 +0,0 @@ -{ - "logs": [ - { - "message": "opened pricing page", - "level": "info", - "context": { - "component": "AppRoot", - "page": "Pricing" - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - } - }, - { - "message": "loading price list", - "level": "trace", - "context": { - "component": "AppRoot", - "page": "Pricing" - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "ghj" - } - } - ], - "exceptions": [ - { - "type": "Error", - "value": "Cannot read property 'find' of undefined", - "stacktrace": { - "frames": [ - { - "colno": 42, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "in_app": true, - "lineno": 8639 - }, - { - "colno": 9, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "dispatchAction", - "in_app": true, - "lineno": 268095 - }, - { - "colno": 13, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "scheduleUpdateOnFiber", - "in_app": true, - "lineno": 273726 - }, - { - "colno": 7, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "flushSyncCallbackQueue", - "in_app": true, - "lineno": 263362 - }, - { - "colno": 13, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "flushSyncCallbackQueueImpl", - "in_app": true, - "lineno": 263374 - }, - { - "colno": 14, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "runWithPriority$1", - "lineno": 263325 - }, - { - "colno": 16, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "unstable_runWithPriority", - "lineno": 291265 - }, - { - "colno": 30, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "lineno": 263379 - }, - { - "colno": 22, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "performSyncWorkOnRoot", - "lineno": 274126 - }, - { - "colno": 11, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "renderRootSync", - "lineno": 274509 - }, - { - "colno": 9, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "workLoopSync", - "lineno": 274543 - }, - { - "colno": 16, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "performUnitOfWork", - "lineno": 274606 - }, - { - "colno": 18, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "beginWork$1", - "in_app": true, - "lineno": 275746 - }, - { - "colno": 20, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "beginWork", - "lineno": 270944 - }, - { - "colno": 24, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "updateFunctionComponent", - "lineno": 269291 - }, - { - "colno": 22, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "renderWithHooks", - "lineno": 266969 - }, - { - "colno": 74, - "filename": "http://fe:3002/static/js/main.chunk.js", - "function": "?", - "in_app": true, - "lineno": 2600 - }, - { - "colno": 65, - "filename": "http://fe:3002/static/js/main.chunk.js", - "function": "useGetBooksQuery", - "lineno": 1299 - }, - { - "colno": 85, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "Module.useQuery", - "lineno": 8495 - }, - { - "colno": 83, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "useBaseQuery", - "in_app": true, - "lineno": 8656 - }, - { - "colno": 14, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "useDeepMemo", - "lineno": 8696 - }, - { - "colno": 55, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "?", - "lineno": 8657 - }, - { - "colno": 47, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData.execute", - "in_app": true, - "lineno": 7883 - }, - { - "colno": 23, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData.getExecuteResult", - "lineno": 7944 - }, - { - "colno": 19, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "QueryData._this.getQueryResult", - "lineno": 7790 - }, - { - "colno": 24, - "filename": "http://fe:3002/static/js/vendors~main.chunk.js", - "function": "new ApolloError", - "in_app": true, - "lineno": 5164 - } - ] - }, - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - } - } - ], - "measurements": [ - { - "values": { - "ttfp": 20.12, - "ttfcp": 22.12, - "ttfb": 14 - }, - "type": "page load", - "timestamp": "2021-09-30T10:46:17.680Z", - "trace": { - "trace_id": "abcd", - "span_id": "def" - } - } - ], - "meta": { - "sdk": { - "name": "grafana-frontend-agent", - "version": "1.0.0" - }, - "app": { - "name": "testapp", - "release": "0.8.2", - "version": "abcdefg", - "environment": "production" - }, - "user": { - "username": "domasx2", - "attributes": { - "foo": "bar" - } - }, - "session": { - "id": "abcd", - "attributes": { - "time_elapsed": "100s" - } - }, - "page": { - "url": "https://example.com/page" - }, - "browser": { - "name": "chrome", - "version": "88.12.1", - "os": "linux", - "mobile": false - }, - "view": { - "name": "foobar" - } - }, - "traces": { - "resourceSpans": [ - { - "resource": { - "attributes": [ - { - "key": "service.name", - "value": { - "stringValue": "unknown_service" - } - }, - { - "key": "telemetry.sdk.language", - "value": { - "stringValue": "webjs" - } - }, - { - "key": "telemetry.sdk.name", - "value": { - "stringValue": "opentelemetry" - } - }, - { - "key": "telemetry.sdk.version", - "value": { - "stringValue": "1.0.1" - } - } - ], - "droppedAttributesCount": 0 - }, - "instrumentationLibrarySpans": [ - { - "spans": [ - { - "traceId": "2d6f18da2663c7e477df23d8a8ad95b7", - "spanId": "50e64e3fac969cbb", - "parentSpanId": "9d9da6529d56706c", - "name": "documentFetch", - "kind": 1, - "startTimeUnixNano": 1646228314336100000, - "endTimeUnixNano": 1646228314351000000, - "attributes": [ - { - "key": "component", - "value": { - "stringValue": "document-load" - } - }, - { - "key": "http.response_content_length", - "value": { - "intValue": 1326 - } - } - ], - "droppedAttributesCount": 0, - "events": [ - { - "timeUnixNano": 1646228314336100000, - "name": "fetchStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314342000000, - "name": "domainLookupStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314342000000, - "name": "domainLookupEnd", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314342000000, - "name": "connectStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314330100000, - "name": "secureConnectionStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314342500000, - "name": "connectEnd", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314342700000, - "name": "requestStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314347000000, - "name": "responseStart", - "attributes": [], - "droppedAttributesCount": 0 - }, - { - "timeUnixNano": 1646228314351000000, - "name": "responseEnd", - "attributes": [], - "droppedAttributesCount": 0 - } - ], - "droppedEventsCount": 0, - "status": { - "code": 0 - }, - "links": [], - "droppedLinksCount": 0 - } - ], - "instrumentationLibrary": { - "name": "@opentelemetry/instrumentation-document-load", - "version": "0.27.1" - } - } - ] - } - ] - } -} diff --git a/internal/static/integrations/v2/app_agent_receiver/traces_exporter.go b/internal/static/integrations/v2/app_agent_receiver/traces_exporter.go deleted file mode 100644 index 941f829452..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/traces_exporter.go +++ /dev/null @@ -1,41 +0,0 @@ -package app_agent_receiver - -import ( - "context" - - "go.opentelemetry.io/collector/consumer" -) - -type tracesConsumerGetter func() (consumer.Traces, error) - -// TracesExporter will send traces to a traces instance -type TracesExporter struct { - getTracesConsumer tracesConsumerGetter -} - -// NewTracesExporter creates a trace exporter for the app agent receiver. -func NewTracesExporter(getTracesConsumer tracesConsumerGetter) AppAgentReceiverExporter { - return &TracesExporter{getTracesConsumer} -} - -// Name of the exporter, for logging purposes -func (te *TracesExporter) Name() string { - return "traces exporter" -} - -// Export implements the AppDataExporter interface -func (te *TracesExporter) Export(ctx context.Context, payload Payload) error { - if payload.Traces == nil { - return nil - } - consumer, err := te.getTracesConsumer() - if err != nil { - return err - } - return consumer.ConsumeTraces(ctx, payload.Traces.Traces) -} - -// Static typecheck tests -var ( - _ AppAgentReceiverExporter = (*TracesExporter)(nil) -) diff --git a/internal/static/integrations/v2/app_agent_receiver/traces_test.go b/internal/static/integrations/v2/app_agent_receiver/traces_test.go deleted file mode 100644 index 3e46227c45..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/traces_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package app_agent_receiver - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/pdata/ptrace" -) - -type mockTracesConsumer struct { - consumed []ptrace.Traces -} - -func (c *mockTracesConsumer) Capabilities() consumer.Capabilities { - return consumer.Capabilities{MutatesData: false} -} - -func (c *mockTracesConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { - c.consumed = append(c.consumed, td) - return nil -} - -func Test_exportTraces_success(t *testing.T) { - ctx := context.Background() - tracesConsumer := &mockTracesConsumer{} - exporter := NewTracesExporter(func() (consumer.Traces, error) { return tracesConsumer, nil }) - payload := loadTestPayload(t) - err := exporter.Export(ctx, payload) - require.NoError(t, err) - require.Len(t, tracesConsumer.consumed, 1) -} - -func Test_exportTraces_noTracesInpayload(t *testing.T) { - ctx := context.Background() - tracesConsumer := &mockTracesConsumer{consumed: nil} - exporter := NewTracesExporter(func() (consumer.Traces, error) { return tracesConsumer, nil }) - payload := loadTestPayload(t) - payload.Traces = nil - err := exporter.Export(ctx, payload) - require.NoError(t, err) - require.Len(t, tracesConsumer.consumed, 0) -} - -func Test_exportTraces_noConsumer(t *testing.T) { - ctx := context.Background() - exporter := NewTracesExporter(func() (consumer.Traces, error) { return nil, errors.New("it dont work") }) - payload := loadTestPayload(t) - err := exporter.Export(ctx, payload) - require.Error(t, err, "it don't work") -} diff --git a/internal/static/integrations/v2/app_agent_receiver/utils.go b/internal/static/integrations/v2/app_agent_receiver/utils.go deleted file mode 100644 index e716cb2043..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/utils.go +++ /dev/null @@ -1,84 +0,0 @@ -package app_agent_receiver - -import ( - "fmt" - "sort" - - "github.com/grafana/agent/internal/util/wildcard" - om "github.com/wk8/go-ordered-map" -) - -// KeyVal is an ordered map of string to interface -type KeyVal = om.OrderedMap - -// NewKeyVal creates new empty KeyVal -func NewKeyVal() *KeyVal { - return om.New() -} - -// KeyValFromMap will instantiate KeyVal from a map[string]string -func KeyValFromMap(m map[string]string) *KeyVal { - kv := NewKeyVal() - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - KeyValAdd(kv, k, m[k]) - } - return kv -} - -// MergeKeyVal will merge source in target -func MergeKeyVal(target *KeyVal, source *KeyVal) { - for el := source.Oldest(); el != nil; el = el.Next() { - target.Set(el.Key, el.Value) - } -} - -// MergeKeyValWithPrefix will merge source in target, adding a prefix to each key being merged in -func MergeKeyValWithPrefix(target *KeyVal, source *KeyVal, prefix string) { - for el := source.Oldest(); el != nil; el = el.Next() { - target.Set(fmt.Sprintf("%s%s", prefix, el.Key), el.Value) - } -} - -// KeyValAdd adds a key + value string pair to kv -func KeyValAdd(kv *KeyVal, key string, value string) { - if len(value) > 0 { - kv.Set(key, value) - } -} - -// KeyValToInterfaceSlice converts KeyVal to []interface{}, typically used for logging -func KeyValToInterfaceSlice(kv *KeyVal) []interface{} { - slice := make([]interface{}, kv.Len()*2) - idx := 0 - for el := kv.Oldest(); el != nil; el = el.Next() { - slice[idx] = el.Key - idx++ - slice[idx] = el.Value - idx++ - } - return slice -} - -// KeyValToInterfaceMap converts KeyVal to map[string]interface -func KeyValToInterfaceMap(kv *KeyVal) map[string]interface{} { - retv := make(map[string]interface{}) - for el := kv.Oldest(); el != nil; el = el.Next() { - retv[fmt.Sprint(el.Key)] = el.Value - } - return retv -} - -// URLMatchesOrigins returns true if URL matches at least one of origin prefix. Wildcard '*' and '?' supported -func urlMatchesOrigins(URL string, origins []string) bool { - for _, origin := range origins { - if origin == "*" || wildcard.Match(origin+"*", URL) { - return true - } - } - return false -} diff --git a/internal/static/integrations/v2/app_agent_receiver/utils_test.go b/internal/static/integrations/v2/app_agent_receiver/utils_test.go deleted file mode 100644 index 6e32dd9626..0000000000 --- a/internal/static/integrations/v2/app_agent_receiver/utils_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package app_agent_receiver - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func testCase(t *testing.T, URL string, origins []string, expected bool) { - result := urlMatchesOrigins(URL, origins) - require.Equal(t, expected, result) -} - -func Test_Origin_WildcardAlwaysMatches(t *testing.T) { - testCase(t, "http://example.com/static/foo.js", []string{"https://foo.com/", "*"}, true) -} - -func Test_Origin_Matches(t *testing.T) { - testCase(t, "http://example.com/static/foo.js", []string{"https://foo.com/", "http://example.com/"}, true) -} - -func Test_Origin_MatchesWithWildcard(t *testing.T) { - testCase(t, "http://foo.bar.com/static/foo.js", []string{"https://foo.com/", "http://*.bar.com/"}, true) -} - -func Test_Origin_DoesNotMatch(t *testing.T) { - testCase(t, "http://example.com/static/foo.js", []string{"https://foo.com/", "http://test.com/"}, false) -} - -func Test_Origin_DoesNotMatchWithWildcard(t *testing.T) { - testCase(t, "http://foo.bar.com/static/foo.js", []string{"https://foo.com/", "http://*.baz.com/"}, false) -} - -func Test_Origin_MatchesWithWildcardNoProtocol(t *testing.T) { - testCase(t, "http://foo.bar.com/static/foo.js", []string{"https://foo.com/", "*.bar.com/"}, true) -} diff --git a/internal/static/integrations/v2/autoscrape/appender.go b/internal/static/integrations/v2/autoscrape/appender.go deleted file mode 100644 index 04be1c7d6a..0000000000 --- a/internal/static/integrations/v2/autoscrape/appender.go +++ /dev/null @@ -1,42 +0,0 @@ -package autoscrape - -import ( - "fmt" - - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/storage" -) - -// failedAppender is used as the appender when an instance couldn't be found. -type failedAppender struct { - instanceName string -} - -var _ storage.Appender = (*failedAppender)(nil) - -func (fa *failedAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - return 0, fmt.Errorf("no such instance %s", fa.instanceName) -} - -func (fa *failedAppender) Commit() error { - return fmt.Errorf("no such instance %s", fa.instanceName) -} - -func (fa *failedAppender) Rollback() error { - return fmt.Errorf("no such instance %s", fa.instanceName) -} - -func (fa *failedAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - return 0, fmt.Errorf("no such instance %s", fa.instanceName) -} - -func (fa *failedAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { - return 0, fmt.Errorf("no such instance %s", fa.instanceName) -} - -func (fa *failedAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, fmt.Errorf("no such instance %s", fa.instanceName) -} diff --git a/internal/static/integrations/v2/autoscrape/autoscrape.go b/internal/static/integrations/v2/autoscrape/autoscrape.go index 8d1bd02ae0..5415f269ac 100644 --- a/internal/static/integrations/v2/autoscrape/autoscrape.go +++ b/internal/static/integrations/v2/autoscrape/autoscrape.go @@ -2,22 +2,9 @@ package autoscrape import ( - "context" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/server" - "github.com/oklog/run" - config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_config "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/relabel" - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" ) // DefaultGlobal holds default values for Global. @@ -53,262 +40,9 @@ type Config struct { MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"` // Relabel individual autoscrape metrics } -// InstanceStore is used to find instances to send metrics to. It is a subset -// of the pkg/metrics/instance.Manager interface. -type InstanceStore interface { - // GetInstance retrieves a ManagedInstance by name. - GetInstance(name string) (instance.ManagedInstance, error) -} - // ScrapeConfig bind a Prometheus scrape config with an instance to send // scraped metrics to. type ScrapeConfig struct { Instance string Config prom_config.ScrapeConfig } - -// Scraper is a metrics autoscraper. -type Scraper struct { - ctx context.Context - cancel context.CancelFunc - - log log.Logger - is InstanceStore - - // Prometheus doesn't pass contextual information at scrape time that could - // be used to change the behavior of generating an appender. This means that - // it's not yet possible for us to just run a single SD + scrape manager for - // all of our integrations, and we instead need to launch a pair of each for - // every instance we're writing to. - - iscrapersMut sync.RWMutex - iscrapers map[string]*instanceScraper - dialerFunc server.DialContextFunc -} - -// NewScraper creates a new autoscraper. Scraper will run until Stop is called. -// Instances to send scraped metrics to will be looked up via im. Scraping will -// use the provided dialerFunc to make connections if non-nil. -func NewScraper(l log.Logger, is InstanceStore, dialerFunc server.DialContextFunc) *Scraper { - l = log.With(l, "component", "autoscraper") - - ctx, cancel := context.WithCancel(context.Background()) - - s := &Scraper{ - ctx: ctx, - cancel: cancel, - - log: l, - is: is, - iscrapers: map[string]*instanceScraper{}, - dialerFunc: dialerFunc, - } - return s -} - -// ApplyConfig will apply the given jobs. An error will be returned for any -// jobs that failed to be applied. -func (s *Scraper) ApplyConfig(jobs []*ScrapeConfig) error { - s.iscrapersMut.Lock() - defer s.iscrapersMut.Unlock() - - var firstError error - saveError := func(e error) { - if firstError == nil { - firstError = e - } - } - - // Shard our jobs by target instance. - shardedJobs := map[string][]*prom_config.ScrapeConfig{} - for _, j := range jobs { - _, err := s.is.GetInstance(j.Instance) - if err != nil { - level.Error(s.log).Log("msg", "cannot autoscrape integration", "name", j.Config.JobName, "err", err) - saveError(err) - continue - } - - shardedJobs[j.Instance] = append(shardedJobs[j.Instance], &j.Config) - } - - // Then pass the jobs to instanceScraper, creating them if we need to. - for instance, jobs := range shardedJobs { - is, ok := s.iscrapers[instance] - if !ok { - is = newInstanceScraper(s.ctx, s.log, s.is, instance, config_util.DialContextFunc(s.dialerFunc)) - s.iscrapers[instance] = is - } - if err := is.ApplyConfig(jobs); err != nil { - // Not logging here; is.ApplyConfig already logged the errors. - saveError(err) - } - } - - // Garbage collect: If there's a key in s.scrapers that wasn't in - // shardedJobs, stop that unused scraper. - for instance, is := range s.iscrapers { - _, current := shardedJobs[instance] - if !current { - is.Stop() - delete(s.iscrapers, instance) - } - } - - return firstError -} - -// TargetsActive returns the set of active scrape targets for all target -// instances. -func (s *Scraper) TargetsActive() map[string]metrics.TargetSet { - s.iscrapersMut.RLock() - defer s.iscrapersMut.RUnlock() - - allTargets := make(map[string]metrics.TargetSet, len(s.iscrapers)) - for instance, is := range s.iscrapers { - allTargets[instance] = is.sm.TargetsActive() - } - return allTargets -} - -// Stop stops the Scraper. -func (s *Scraper) Stop() { - s.iscrapersMut.Lock() - defer s.iscrapersMut.Unlock() - - for instance, is := range s.iscrapers { - is.Stop() - delete(s.iscrapers, instance) - } - - s.cancel() -} - -// instanceScraper is a Scraper which always sends to the same instance. -type instanceScraper struct { - log log.Logger - - sd *discovery.Manager - sm *scrape.Manager - cancel context.CancelFunc - exited chan struct{} -} - -// newInstanceScraper runs a new instanceScraper. Must be stopped by calling -// Stop. -func newInstanceScraper( - ctx context.Context, - l log.Logger, - s InstanceStore, - instanceName string, - dialerFunc config_util.DialContextFunc, -) *instanceScraper { - - ctx, cancel := context.WithCancel(ctx) - l = log.With(l, "target_instance", instanceName) - - sdOpts := []func(*discovery.Manager){ - discovery.Name("autoscraper/" + instanceName), - discovery.HTTPClientOptions( - // If dialerFunc is nil, scrape.NewManager will use Go's default dialer. - config_util.WithDialContextFunc(dialerFunc), - ), - } - sd := discovery.NewManager(ctx, l, sdOpts...) - sm := scrape.NewManager(&scrape.Options{ - HTTPClientOptions: []config_util.HTTPClientOption{ - // If dialerFunc is nil, scrape.NewManager will use Go's default dialer. - config_util.WithDialContextFunc(dialerFunc), - }, - }, l, &agentAppender{ - inst: instanceName, - is: s, - }) - - is := &instanceScraper{ - log: l, - - sd: sd, - sm: sm, - cancel: cancel, - exited: make(chan struct{}), - } - - go is.run() - return is -} - -type agentAppender struct { - inst string - is InstanceStore -} - -func (aa *agentAppender) Appender(ctx context.Context) storage.Appender { - mi, err := aa.is.GetInstance(aa.inst) - if err != nil { - return &failedAppender{instanceName: aa.inst} - } - return mi.Appender(ctx) -} - -func (is *instanceScraper) run() { - defer close(is.exited) - var rg run.Group - - rg.Add(func() error { - // Service discovery will stop whenever our parent context is canceled or - // if is.cancel is called. - err := is.sd.Run() - if err != nil { - level.Error(is.log).Log("msg", "autoscrape service discovery exited with error", "err", err) - } - return err - }, func(_ error) { - is.cancel() - }) - - rg.Add(func() error { - err := is.sm.Run(is.sd.SyncCh()) - if err != nil { - level.Error(is.log).Log("msg", "autoscrape scrape manager exited with error", "err", err) - } - return err - }, func(_ error) { - is.sm.Stop() - }) - - _ = rg.Run() -} - -func (is *instanceScraper) ApplyConfig(jobs []*prom_config.ScrapeConfig) error { - var firstError error - saveError := func(e error) { - if firstError == nil && e != nil { - firstError = e - } - } - - var ( - scrapeConfigs = make([]*prom_config.ScrapeConfig, 0, len(jobs)) - sdConfigs = make(map[string]discovery.Configs, len(jobs)) - ) - for _, job := range jobs { - sdConfigs[job.JobName] = job.ServiceDiscoveryConfigs - scrapeConfigs = append(scrapeConfigs, job) - } - if err := is.sd.ApplyConfig(sdConfigs); err != nil { - level.Error(is.log).Log("msg", "error when applying SD to autoscraper", "err", err) - saveError(err) - } - if err := is.sm.ApplyConfig(&prom_config.Config{ScrapeConfigs: scrapeConfigs}); err != nil { - level.Error(is.log).Log("msg", "error when applying jobs to scraper", "err", err) - saveError(err) - } - - return firstError -} - -func (is *instanceScraper) Stop() { - is.cancel() - <-is.exited -} diff --git a/internal/static/integrations/v2/autoscrape/autoscrape_test.go b/internal/static/integrations/v2/autoscrape/autoscrape_test.go deleted file mode 100644 index 9aaa148ecd..0000000000 --- a/internal/static/integrations/v2/autoscrape/autoscrape_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package autoscrape - -import ( - "context" - "net/http/httptest" - "testing" - "time" - - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/model" - prom_config "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/storage" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -// TestAutoscrape is a basic end-to-end test of the autoscraper. -func TestAutoscrape(t *testing.T) { - srv := httptest.NewServer(promhttp.Handler()) - defer srv.Close() - - wt := util.NewWaitTrigger() - - noop := noOpAppender - noop.AppendFunc = func(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - wt.Trigger() - return noOpAppender.AppendFunc(ref, l, t, v) - } - - im := instance.MockManager{ - GetInstanceFunc: func(name string) (instance.ManagedInstance, error) { - assert.Equal(t, t.Name(), name) - return &mockInstance{app: &noop}, nil - }, - } - as := NewScraper(util.TestLogger(t), im, nil) - defer as.Stop() - - err := as.ApplyConfig([]*ScrapeConfig{{ - Instance: t.Name(), - Config: func() prom_config.ScrapeConfig { - cfg := prom_config.DefaultScrapeConfig - cfg.JobName = t.Name() - cfg.ScrapeInterval = model.Duration(time.Second) - cfg.ScrapeTimeout = model.Duration(time.Second / 2) - cfg.ServiceDiscoveryConfigs = discovery.Configs{ - discovery.StaticConfig{{ - Targets: []model.LabelSet{{ - model.AddressLabel: model.LabelValue(srv.Listener.Addr().String()), - }}, - Source: t.Name(), - }}, - } - return cfg - }(), - }}) - require.NoError(t, err, "failed to apply configs") - - // NOTE(rfratto): SD won't start sending targets until after 5 seconds. We'll - // need to at least wait that long. - time.Sleep(5 * time.Second) - - require.NoError(t, wt.Wait(5*time.Second), "timed out waiting for scrape") -} - -var globalRef atomic.Uint64 -var noOpAppender = mockAppender{ - AppendFunc: func(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - return storage.SeriesRef(globalRef.Inc()), nil - }, - CommitFunc: func() error { return nil }, - RollbackFunc: func() error { return nil }, - AppendExemplarFunc: func(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - return storage.SeriesRef(globalRef.Inc()), nil - }, - AppendHistogramFunc: func(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - return storage.SeriesRef(globalRef.Inc()), nil - }, -} - -type mockAppender struct { - AppendFunc func(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) - CommitFunc func() error - RollbackFunc func() error - AppendExemplarFunc func(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) - UpdateMetadataFunc func(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) - AppendHistogramFunc func(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) -} - -func (ma *mockAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - return ma.AppendFunc(ref, l, t, v) -} -func (ma *mockAppender) Commit() error { return ma.CommitFunc() } -func (ma *mockAppender) Rollback() error { return ma.RollbackFunc() } -func (ma *mockAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - return ma.AppendExemplarFunc(ref, l, e) -} -func (ma *mockAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { - return ma.UpdateMetadataFunc(ref, l, m) -} -func (ma *mockAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - return ma.AppendHistogramFunc(ref, l, t, h, fh) -} - -type mockInstance struct { - instance.NoOpInstance - app storage.Appender -} - -func (mi *mockInstance) Appender(ctx context.Context) storage.Appender { return mi.app } diff --git a/internal/static/integrations/v2/controller.go b/internal/static/integrations/v2/controller.go deleted file mode 100644 index b01a666119..0000000000 --- a/internal/static/integrations/v2/controller.go +++ /dev/null @@ -1,444 +0,0 @@ -package integrations - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "sort" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/prometheus/prometheus/discovery" - http_sd "github.com/prometheus/prometheus/discovery/http" - "go.uber.org/atomic" -) - -// controllerConfig holds a set of integration configs. -type controllerConfig []Config - -// controller manages a set of integrations. -type controller struct { - logger log.Logger - - mut sync.Mutex - cfg controllerConfig - globals Globals - integrations []*controlledIntegration // Running integrations - - runIntegrations chan []*controlledIntegration // Schedule integrations to run -} - -// newController creates a new Controller. Controller is intended to be -// embedded inside of integrations that may want to multiplex other -// integrations. -func newController(l log.Logger, cfg controllerConfig, globals Globals) (*controller, error) { - c := &controller{ - logger: l, - runIntegrations: make(chan []*controlledIntegration, 1), - } - if err := c.UpdateController(cfg, globals); err != nil { - return nil, err - } - return c, nil -} - -// run starts the controller and blocks until ctx is canceled. -func (c *controller) run(ctx context.Context) { - pool := newWorkerPool(ctx, c.logger) - defer pool.Close() - - for { - select { - case <-ctx.Done(): - level.Debug(c.logger).Log("msg", "controller exiting") - return - case newIntegrations := <-c.runIntegrations: - pool.Reload(newIntegrations) - } - } -} - -// controlledIntegration is a running Integration. A running integration is -// identified uniquely by its id. -type controlledIntegration struct { - id integrationID - i Integration - c Config // Config that generated i. Used for changing to see if a config changed. - running atomic.Bool -} - -func (ci *controlledIntegration) Running() bool { - return ci.running.Load() -} - -// integrationID uses a tuple of Name and Identifier to uniquely identify an -// integration. -type integrationID struct{ Name, Identifier string } - -func (id integrationID) String() string { - return fmt.Sprintf("%s/%s", id.Name, id.Identifier) -} - -// UpdateController updates the Controller with new Controller and -// IntegrationOptions. -// -// UpdateController updates running integrations. Extensions can be -// recalculated by calling relevant methods like Handler or Targets. -func (c *controller) UpdateController(cfg controllerConfig, globals Globals) error { - c.mut.Lock() - defer c.mut.Unlock() - - // Ensure that no singleton integration is defined twice - var ( - duplicatedSingletons []string - singletonSet = make(map[string]struct{}) - ) - for _, cfg := range cfg { - t, _ := RegisteredType(cfg) - if t != TypeSingleton { - continue - } - - if _, exists := singletonSet[cfg.Name()]; exists { - duplicatedSingletons = append(duplicatedSingletons, cfg.Name()) - continue - } - singletonSet[cfg.Name()] = struct{}{} - } - if len(duplicatedSingletons) == 1 { - return fmt.Errorf("integration %q may only be defined once", duplicatedSingletons[0]) - } else if len(duplicatedSingletons) > 1 { - list := strings.Join(duplicatedSingletons, ", ") - return fmt.Errorf("the following integrations may only be defined once each: %s", list) - } - - integrationIDMap := map[integrationID]struct{}{} - - integrations := make([]*controlledIntegration, 0, len(cfg)) - -NextConfig: - for _, ic := range cfg { - name := ic.Name() - - identifier, err := ic.Identifier(globals) - if err != nil { - return fmt.Errorf("could not build identifier for integration %q: %w", name, err) - } - - if err := ic.ApplyDefaults(globals); err != nil { - return fmt.Errorf("failed to apply defaults for %s/%s: %w", name, identifier, err) - } - - id := integrationID{Name: name, Identifier: identifier} - if _, exist := integrationIDMap[id]; exist { - return fmt.Errorf("multiple instance names %q in integration %q", identifier, name) - } - integrationIDMap[id] = struct{}{} - - // Now that we know the ID for an integration, we can check to see if it's - // running and can be dynamically updated. - for _, ci := range c.integrations { - if ci.id != id { - continue - } - - // If the configs haven't changed, then we don't need to do anything. - if CompareConfigs(ci.c, ic) { - integrations = append(integrations, ci) - continue NextConfig - } - - if ui, ok := ci.i.(UpdateIntegration); ok { - if err := ui.ApplyConfig(ic, globals); errors.Is(err, ErrInvalidUpdate) { - level.Warn(c.logger).Log("msg", "failed to dynamically update integration; will recreate", "integration", name, "instance", identifier, "err', err") - break - } else if err != nil { - return fmt.Errorf("failed to update %s integration %q: %w", name, identifier, err) - } else { - // Update succeeded; re-use the running one and go to the next - // integration to process. - integrations = append(integrations, ci) - continue NextConfig - } - } - - // We found the integration to update: we can stop this loop now. - break - } - - logger := log.With(c.logger, "integration", name, "instance", identifier) - integration, err := ic.NewIntegration(logger, globals) - if err != nil { - return fmt.Errorf("failed to construct %s integration %q: %w", name, identifier, err) - } - - // Create a new controlled integration. - integrations = append(integrations, &controlledIntegration{ - id: id, - i: integration, - c: ic, - }) - } - - // Schedule integrations to run - c.runIntegrations <- integrations - - c.cfg = cfg - c.globals = globals - c.integrations = integrations - return nil -} - -// Handler returns an HTTP handler for the controller and its integrations. -// Handler will pass through requests to other running integrations. Handler -// always returns an http.Handler regardless of error. -// -// Handler is expensive to compute and should only be done after reloading the -// config. -func (c *controller) Handler(prefix string) (http.Handler, error) { - var firstErr error - saveFirstErr := func(err error) { - if firstErr == nil { - firstErr = err - } - } - - r := mux.NewRouter() - - err := c.forEachIntegration(prefix, func(ci *controlledIntegration, iprefix string) { - id := ci.id - - i, ok := ci.i.(HTTPIntegration) - if !ok { - return - } - - handler, err := i.Handler(iprefix + "/") - if err != nil { - saveFirstErr(fmt.Errorf("could not generate HTTP handler for %s integration %q: %w", id.Name, id.Identifier, err)) - return - } else if handler == nil { - return - } - - // Anything that matches the integrationPrefix should be passed to the handler. - // The reason these two are separated is if you have two instance names and one is a prefix of another - // ie localhost and localhost2, localhost2 will never get called because localhost will always get precedence - // add / fixes this, but to keep old behavior we need to ensure /localhost and localhost2 also work, hence - // the second handlefunc below this one. https://github.com/grafana/agent/issues/1718 - hfunc := func(rw http.ResponseWriter, r *http.Request) { - if !ci.Running() { - http.Error(rw, fmt.Sprintf("%s integration intance %q not running", id.Name, id.Identifier), http.StatusServiceUnavailable) - return - } - handler.ServeHTTP(rw, r) - } - r.PathPrefix(iprefix + "/").HandlerFunc(hfunc) - // Handle calling the iprefix itself - r.HandleFunc(iprefix, hfunc) - }) - if err != nil { - level.Warn(c.logger).Log("msg", "error when iterating over integrations to build HTTP handlers", "err", err) - } - - // TODO(rfratto): navigation page for exact prefix match - - return r, firstErr -} - -// forEachIntegration calculates the prefix for each integration and calls f. -// prefix will not end in /. -func (c *controller) forEachIntegration(basePrefix string, f func(ci *controlledIntegration, iprefix string)) error { - c.mut.Lock() - defer c.mut.Unlock() - - // Pre-populate a mapping of integration name -> identifier. If there are - // two instances of the same integration, we want to ensure unique routing. - // - // This special logic is done for backwards compatibility with the original - // design of integrations. - identifiersMap := map[string][]string{} - for _, i := range c.integrations { - identifiersMap[i.id.Name] = append(identifiersMap[i.id.Name], i.id.Identifier) - } - - usedPrefixes := map[string]struct{}{} - - for _, ci := range c.integrations { - id := ci.id - multipleInstances := len(identifiersMap[id.Name]) > 1 - - var integrationPrefix string - if multipleInstances { - // i.e., /integrations/mysqld_exporter/server-a - integrationPrefix = path.Join(basePrefix, id.Name, id.Identifier) - } else { - // i.e., /integrations/node_exporter - integrationPrefix = path.Join(basePrefix, id.Name) - } - - f(ci, integrationPrefix) - - if _, exist := usedPrefixes[integrationPrefix]; exist { - return fmt.Errorf("BUG: duplicate integration prefix %q", integrationPrefix) - } - usedPrefixes[integrationPrefix] = struct{}{} - } - return nil -} - -// Targets returns the current set of targets across all integrations. Use opts -// to customize which targets are returned. -func (c *controller) Targets(ep Endpoint, opts TargetOptions) []*targetGroup { - // Grab the integrations as fast as possible. We don't want to spend too much - // time holding the mutex. - type prefixedMetricsIntegration struct { - id integrationID - i MetricsIntegration - ep Endpoint - } - var mm []prefixedMetricsIntegration - - err := c.forEachIntegration(ep.Prefix, func(ci *controlledIntegration, iprefix string) { - // Best effort liveness check. They might stop running when we request - // their targets, which is fine, but we should save as much work as we - // can. - if !ci.Running() { - return - } - if mi, ok := ci.i.(MetricsIntegration); ok { - ep := Endpoint{Host: ep.Host, Prefix: iprefix} - mm = append(mm, prefixedMetricsIntegration{id: ci.id, i: mi, ep: ep}) - } - }) - if err != nil { - level.Warn(c.logger).Log("msg", "error when iterating over integrations to get targets", "err", err) - } - - var tgs []*targetGroup - for _, mi := range mm { - // If we're looking for a subset of integrations, filter out anything that doesn't match. - if len(opts.Integrations) > 0 && !stringSliceContains(opts.Integrations, mi.id.Name) { - continue - } - // If we're looking for a specific instance, filter out anything that doesn't match. - if opts.Instance != "" && mi.id.Identifier != opts.Instance { - continue - } - - for _, tgt := range mi.i.Targets(mi.ep) { - tgs = append(tgs, (*targetGroup)(tgt)) - } - } - sort.Slice(tgs, func(i, j int) bool { - return tgs[i].Source < tgs[j].Source - }) - return tgs -} - -func stringSliceContains(ss []string, s string) bool { - for _, check := range ss { - if check == s { - return true - } - } - return false -} - -// TargetOptions controls which targets should be returned by the subsystem. -type TargetOptions struct { - // Integrations is the set of integrations to return. An empty slice will - // default to returning all integrations. - Integrations []string - // Instance matches a specific instance from all integrations. An empty - // string will match any instance. - Instance string -} - -// TargetOptionsFromParams creates TargetOptions from parsed URL query parameters. -func TargetOptionsFromParams(u url.Values) (TargetOptions, error) { - var to TargetOptions - - rawIntegrations := u.Get("integrations") - if rawIntegrations != "" { - rawIntegrations, err := url.QueryUnescape(rawIntegrations) - if err != nil { - return to, fmt.Errorf("invalid value for integrations: %w", err) - } - to.Integrations = strings.Split(rawIntegrations, ",") - } - - rawInstance := u.Get("instance") - if rawInstance != "" { - rawInstance, err := url.QueryUnescape(rawInstance) - if err != nil { - return to, fmt.Errorf("invalid value for instance: %w", err) - } - to.Instance = rawInstance - } - - return to, nil -} - -// ToParams will convert to into URL query parameters. -func (to TargetOptions) ToParams() url.Values { - p := make(url.Values) - if len(to.Integrations) != 0 { - p.Set("integrations", url.QueryEscape(strings.Join(to.Integrations, ","))) - } - if to.Instance != "" { - p.Set("instance", url.QueryEscape(to.Instance)) - } - return p -} - -// ScrapeConfigs returns a set of scrape configs to use for self-scraping. -// sdConfig should contain the full URL where the integrations SD API is -// exposed. ScrapeConfigs will inject unique query parameters per integration -// to limit what will be discovered. -func (c *controller) ScrapeConfigs(prefix string, sdConfig *http_sd.SDConfig) []*autoscrape.ScrapeConfig { - // Grab the integrations as fast as possible. We don't want to spend too much - // time holding the mutex. - type prefixedMetricsIntegration struct { - id integrationID - i MetricsIntegration - prefix string - } - var mm []prefixedMetricsIntegration - - err := c.forEachIntegration(prefix, func(ci *controlledIntegration, iprefix string) { - if mi, ok := ci.i.(MetricsIntegration); ok { - mm = append(mm, prefixedMetricsIntegration{id: ci.id, i: mi, prefix: iprefix}) - } - }) - if err != nil { - level.Warn(c.logger).Log("msg", "error when iterating over integrations to get scrape configs", "err", err) - } - - var cfgs []*autoscrape.ScrapeConfig - for _, mi := range mm { - // sdConfig will be pointing to the targets API. By default, this returns absolutely everything. - // We want to use the query parameters to inform the API to only return - // specific targets. - opts := TargetOptions{ - Integrations: []string{mi.id.Name}, - Instance: mi.id.Identifier, - } - - integrationSDConfig := *sdConfig - integrationSDConfig.URL = sdConfig.URL + "?" + opts.ToParams().Encode() - sds := discovery.Configs{&integrationSDConfig} - cfgs = append(cfgs, mi.i.ScrapeConfigs(sds)...) - } - sort.Slice(cfgs, func(i, j int) bool { - return cfgs[i].Config.JobName < cfgs[j].Config.JobName - }) - return cfgs -} diff --git a/internal/static/integrations/v2/controller_httpintegration_test.go b/internal/static/integrations/v2/controller_httpintegration_test.go deleted file mode 100644 index ee817a1c53..0000000000 --- a/internal/static/integrations/v2/controller_httpintegration_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package integrations - -import ( - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/go-kit/log" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/util" - "github.com/stretchr/testify/require" -) - -// -// Tests for controller's utilization of the HTTPIntegration interface. -// - -func Test_controller_Handler_Sync(t *testing.T) { - httpConfigFromID := func(t *testing.T, name, identifier string) Config { - t.Helper() - - cfg := mockConfigNameTuple(t, name, identifier) - cfg.NewIntegrationFunc = func(log.Logger, Globals) (Integration, error) { - i := mockHTTPIntegration{ - Integration: NoOpIntegration, - HandlerFunc: func(prefix string) (http.Handler, error) { - return http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { - // We should never reach here since we don't run the integrations. - rw.WriteHeader(http.StatusBadRequest) - }), nil - }, - } - return i, nil - } - - return cfg - } - - cfg := controllerConfig{httpConfigFromID(t, "foo", "bar")} - ctrl, err := newController(util.TestLogger(t), cfg, Globals{}) - require.NoError(t, err) - - handler, err := ctrl.Handler("/integrations/") - require.NoError(t, err) - - srv := httptest.NewServer(handler) - - resp, err := srv.Client().Get(srv.URL + "/integrations/foo/bar") - require.NoError(t, err) - require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode) -} - -// Test_controller_HTTPIntegration_Prefixes ensures that the controller will assign -// appropriate prefixes to HTTPIntegrations. -func Test_controller_HTTPIntegration_Prefixes(t *testing.T) { - httpConfigFromID := func(t *testing.T, prefixes *[]string, name, identifier string) Config { - t.Helper() - - cfg := mockConfigNameTuple(t, name, identifier) - cfg.NewIntegrationFunc = func(log.Logger, Globals) (Integration, error) { - i := mockHTTPIntegration{ - Integration: NoOpIntegration, - HandlerFunc: func(prefix string) (http.Handler, error) { - *prefixes = append(*prefixes, prefix) - return http.NotFoundHandler(), nil - }, - } - return i, nil - } - - return cfg - } - - t.Run("fully unique", func(t *testing.T) { - var prefixes []string - - ctrl, err := newController( - util.TestLogger(t), - controllerConfig{ - httpConfigFromID(t, &prefixes, "foo", "bar"), - httpConfigFromID(t, &prefixes, "fizz", "buzz"), - httpConfigFromID(t, &prefixes, "hello", "world"), - }, - Globals{}, - ) - require.NoError(t, err) - _ = newSyncController(t, ctrl) - - _, err = ctrl.Handler("/integrations/") - require.NoError(t, err) - - expect := []string{ - "/integrations/foo/", - "/integrations/fizz/", - "/integrations/hello/", - } - require.ElementsMatch(t, prefixes, expect) - }) - - t.Run("multiple instances", func(t *testing.T) { - var prefixes []string - - ctrl, err := newController( - util.TestLogger(t), - controllerConfig{ - httpConfigFromID(t, &prefixes, "foo", "bar"), - httpConfigFromID(t, &prefixes, "foo", "buzz"), - httpConfigFromID(t, &prefixes, "hello", "world"), - }, - Globals{}, - ) - require.NoError(t, err) - _ = newSyncController(t, ctrl) - - _, err = ctrl.Handler("/integrations/") - require.NoError(t, err) - - expect := []string{ - "/integrations/foo/bar/", - "/integrations/foo/buzz/", - "/integrations/hello/", - } - require.ElementsMatch(t, prefixes, expect) - }) -} - -// Test_controller_HTTPIntegration_Routing ensures that the controller will route -// requests to the appropriate integration. -func Test_controller_HTTPIntegration_Routing(t *testing.T) { - httpConfigFromID := func(t *testing.T, name, identifier string) Config { - t.Helper() - - cfg := mockConfigNameTuple(t, name, identifier) - cfg.NewIntegrationFunc = func(log.Logger, Globals) (Integration, error) { - i := mockHTTPIntegration{ - Integration: NoOpIntegration, - HandlerFunc: func(prefix string) (http.Handler, error) { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - fmt.Fprintf(rw, "prefix=%s, path=%s", prefix, r.URL.Path) - }), nil - }, - } - return i, nil - } - - return cfg - } - - ctrl, err := newController( - util.TestLogger(t), - controllerConfig{ - httpConfigFromID(t, "foo", "bar"), - httpConfigFromID(t, "foo", "buzz"), - httpConfigFromID(t, "hello", "world"), - }, - Globals{}, - ) - require.NoError(t, err) - _ = newSyncController(t, ctrl) - - handler, err := ctrl.Handler("/integrations/") - require.NoError(t, err) - - srv := httptest.NewServer(handler) - - getResponse := func(t *testing.T, path string) string { - t.Helper() - resp, err := srv.Client().Get(srv.URL + path) - require.NoError(t, err) - defer resp.Body.Close() - - var sb strings.Builder - _, err = io.Copy(&sb, resp.Body) - require.NoError(t, err) - return sb.String() - } - - tt := []struct { - path, expect string - }{ - {"/integrations/foo/bar", "prefix=/integrations/foo/bar/, path=/integrations/foo/bar"}, - {"/integrations/foo/bar/", "prefix=/integrations/foo/bar/, path=/integrations/foo/bar/"}, - {"/integrations/foo/bar/extra", "prefix=/integrations/foo/bar/, path=/integrations/foo/bar/extra"}, - } - - for _, tc := range tt { - require.Equal(t, tc.expect, getResponse(t, tc.path)) - } -} - -// Test_controller_HTTPIntegration_NestedRouting ensures that the controller -// will work with nested routers. -func Test_controller_HTTPIntegration_NestedRouting(t *testing.T) { - cfg := mockConfigNameTuple(t, "test", "test") - cfg.NewIntegrationFunc = func(log.Logger, Globals) (Integration, error) { - i := mockHTTPIntegration{ - Integration: NoOpIntegration, - HandlerFunc: func(prefix string) (http.Handler, error) { - r := mux.NewRouter() - r.StrictSlash(true) - - r.HandleFunc(prefix, func(rw http.ResponseWriter, r *http.Request) { - fmt.Fprintf(rw, "prefix=%s, path=%s", prefix, r.URL.Path) - }) - - r.HandleFunc(prefix+"greet", func(rw http.ResponseWriter, _ *http.Request) { - fmt.Fprintf(rw, "Hello, world!") - }) - return r, nil - }, - } - return i, nil - } - - ctrl, err := newController(util.TestLogger(t), controllerConfig{cfg}, Globals{}) - require.NoError(t, err) - _ = newSyncController(t, ctrl) - - handler, err := ctrl.Handler("/integrations/") - require.NoError(t, err) - - srv := httptest.NewServer(handler) - - getResponse := func(t *testing.T, path string) string { - t.Helper() - resp, err := srv.Client().Get(srv.URL + path) - require.NoError(t, err) - defer resp.Body.Close() - - var sb strings.Builder - _, err = io.Copy(&sb, resp.Body) - require.NoError(t, err) - return sb.String() - } - - tt := []struct { - path, expect string - }{ - {"/integrations/test", "prefix=/integrations/test/, path=/integrations/test/"}, - {"/integrations/test/", "prefix=/integrations/test/, path=/integrations/test/"}, - {"/integrations/test/greet", "Hello, world!"}, - } - - for _, tc := range tt { - require.Equal(t, tc.expect, getResponse(t, tc.path)) - } -} - -type mockHTTPIntegration struct { - Integration - HandlerFunc func(prefix string) (http.Handler, error) -} - -func (m mockHTTPIntegration) Handler(prefix string) (http.Handler, error) { - return m.HandlerFunc(prefix) -} diff --git a/internal/static/integrations/v2/controller_metricsintegration_test.go b/internal/static/integrations/v2/controller_metricsintegration_test.go deleted file mode 100644 index 67222e61fe..0000000000 --- a/internal/static/integrations/v2/controller_metricsintegration_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package integrations - -import ( - "context" - nethttp "net/http" - "testing" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/common/model" - prom_config "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/http" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/stretchr/testify/require" -) - -// -// Tests for controller's utilization of the MetricsIntegration interface. -// - -func Test_controller_MetricsIntegration_Targets(t *testing.T) { - integrationWithTarget := func(targetName string) Integration { - return mockMetricsIntegration{ - HTTPIntegration: newWaitStartedIntegration(), - TargetsFunc: func(Endpoint) []*targetgroup.Group { - return []*targetgroup.Group{{ - Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(targetName)}}, - }} - }, - ScrapeConfigsFunc: func(c discovery.Configs) []*autoscrape.ScrapeConfig { return nil }, - } - } - - integrations := []Config{ - mockConfigNameTuple(t, "a", "instanceA").WithNewIntegrationFunc(func(l log.Logger, g Globals) (Integration, error) { - return integrationWithTarget("a"), nil - }), - mockConfigNameTuple(t, "b", "instanceB").WithNewIntegrationFunc(func(l log.Logger, g Globals) (Integration, error) { - return integrationWithTarget("b"), nil - }), - } - - // waitIntegrations starts a controller and waits for all of its integrations - // to run. - waitIntegrations := func(t *testing.T, ctrl *controller) { - t.Helper() - _ = newSyncController(t, ctrl) - err := ctrl.forEachIntegration("/", func(ci *controlledIntegration, _ string) { - wsi := ci.i.(mockMetricsIntegration).HTTPIntegration.(*waitStartedIntegration) - _ = wsi.trigger.WaitContext(context.Background()) - }) - require.NoError(t, err) - } - - t.Run("All", func(t *testing.T) { - ctrl, err := newController( - util.TestLogger(t), - controllerConfig(integrations), - Globals{}, - ) - require.NoError(t, err) - waitIntegrations(t, ctrl) - - result := ctrl.Targets(Endpoint{Prefix: "/"}, TargetOptions{}) - expect := []*targetGroup{ - {Targets: []model.LabelSet{{model.AddressLabel: "a"}}}, - {Targets: []model.LabelSet{{model.AddressLabel: "b"}}}, - } - require.Equal(t, expect, result) - }) - - t.Run("All by Integration", func(t *testing.T) { - ctrl, err := newController( - util.TestLogger(t), - controllerConfig(integrations), - Globals{}, - ) - require.NoError(t, err) - waitIntegrations(t, ctrl) - - result := ctrl.Targets(Endpoint{Prefix: "/"}, TargetOptions{ - Integrations: []string{"a", "b"}, - }) - expect := []*targetGroup{ - {Targets: []model.LabelSet{{model.AddressLabel: "a"}}}, - {Targets: []model.LabelSet{{model.AddressLabel: "b"}}}, - } - require.Equal(t, expect, result) - }) - - t.Run("Specific Integration", func(t *testing.T) { - ctrl, err := newController( - util.TestLogger(t), - controllerConfig(integrations), - Globals{}, - ) - require.NoError(t, err) - waitIntegrations(t, ctrl) - - result := ctrl.Targets(Endpoint{Prefix: "/"}, TargetOptions{ - Integrations: []string{"a"}, - }) - expect := []*targetGroup{ - {Targets: []model.LabelSet{{model.AddressLabel: "a"}}}, - } - require.Equal(t, expect, result) - }) -} - -func Test_controller_MetricsIntegration_ScrapeConfig(t *testing.T) { - integrationWithTarget := func(targetName string) Integration { - return mockMetricsIntegration{ - HTTPIntegration: NoOpIntegration, - ScrapeConfigsFunc: func(c discovery.Configs) []*autoscrape.ScrapeConfig { - return []*autoscrape.ScrapeConfig{{ - Instance: "default", - Config: prom_config.ScrapeConfig{JobName: targetName}, - }} - }, - } - } - - integrations := []Config{ - mockConfigNameTuple(t, "a", "instanceA").WithNewIntegrationFunc(func(l log.Logger, g Globals) (Integration, error) { - return integrationWithTarget("a"), nil - }), - mockConfigNameTuple(t, "b", "instanceB").WithNewIntegrationFunc(func(l log.Logger, g Globals) (Integration, error) { - return integrationWithTarget("b"), nil - }), - } - - ctrl, err := newController( - util.TestLogger(t), - controllerConfig(integrations), - Globals{}, - ) - require.NoError(t, err) - _ = newSyncController(t, ctrl) - - result := ctrl.ScrapeConfigs("/", &http.DefaultSDConfig) - expect := []*autoscrape.ScrapeConfig{ - {Instance: "default", Config: prom_config.ScrapeConfig{JobName: "a"}}, - {Instance: "default", Config: prom_config.ScrapeConfig{JobName: "b"}}, - } - require.Equal(t, expect, result) -} - -// -// Tests for controller's utilization of the MetricsIntegration interface. -// - -type waitStartedIntegration struct { - trigger *util.WaitTrigger -} - -func newWaitStartedIntegration() *waitStartedIntegration { - return &waitStartedIntegration{trigger: util.NewWaitTrigger()} -} - -func (i *waitStartedIntegration) RunIntegration(ctx context.Context) error { - i.trigger.Trigger() - <-ctx.Done() - return nil -} - -func (i *waitStartedIntegration) Handler(prefix string) (nethttp.Handler, error) { - return nil, nil -} - -type mockMetricsIntegration struct { - HTTPIntegration - TargetsFunc func(ep Endpoint) []*targetgroup.Group - ScrapeConfigsFunc func(discovery.Configs) []*autoscrape.ScrapeConfig -} - -func (m mockMetricsIntegration) Targets(ep Endpoint) []*targetgroup.Group { - return m.TargetsFunc(ep) -} - -func (m mockMetricsIntegration) ScrapeConfigs(cfgs discovery.Configs) []*autoscrape.ScrapeConfig { - return m.ScrapeConfigsFunc(cfgs) -} diff --git a/internal/static/integrations/v2/controller_test.go b/internal/static/integrations/v2/controller_test.go deleted file mode 100644 index dcae71c56f..0000000000 --- a/internal/static/integrations/v2/controller_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package integrations - -import ( - "context" - "strings" - "sync" - "testing" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -// -// Tests for Controller's utilization of the core Integration interface. -// - -// Test_controller_UniqueIdentifier ensures that integrations must not share a (name, id) tuple. -func Test_controller_UniqueIdentifier(t *testing.T) { - controllerFromConfigs := func(t *testing.T, cc []Config) (*controller, error) { - t.Helper() - return newController(util.TestLogger(t), controllerConfig(cc), Globals{}) - } - - t.Run("different name, identifier", func(t *testing.T) { - _, err := controllerFromConfigs(t, []Config{ - mockConfigNameTuple(t, "foo", "bar"), - mockConfigNameTuple(t, "fizz", "buzz"), - }) - require.NoError(t, err) - }) - - t.Run("same name, different identifier", func(t *testing.T) { - _, err := controllerFromConfigs(t, []Config{ - mockConfigNameTuple(t, "foo", "bar"), - mockConfigNameTuple(t, "foo", "buzz"), - }) - require.NoError(t, err) - }) - - t.Run("same name, same identifier", func(t *testing.T) { - _, err := controllerFromConfigs(t, []Config{ - mockConfigNameTuple(t, "foo", "bar"), - mockConfigNameTuple(t, "foo", "bar"), - }) - require.Error(t, err, `multiple instance names "bar" in integration "foo"`) - }) -} - -// Test_controller_RunsIntegration ensures that integrations -// run. -func Test_controller_RunsIntegration(t *testing.T) { - var wg sync.WaitGroup - wg.Add(1) - - ctx, cancel := context.WithCancel(context.Background()) - - ctrl, err := newController( - util.TestLogger(t), - controllerConfig{ - mockConfigForIntegration(t, FuncIntegration(func(ctx context.Context) error { - defer wg.Done() - cancel() - <-ctx.Done() - return nil - })), - }, - Globals{}, - ) - require.NoError(t, err, "failed to create controller") - - // Run the controller. The controller should immediately run our fake integration - // which will cancel ctx and cause ctrl to exit. - ctrl.run(ctx) - - // Make sure that our integration exited too. - wg.Wait() -} - -// Test_controller_ConfigChanges ensures that integrations only get restarted -// when configs are no longer equal. -func Test_controller_ConfigChanges(t *testing.T) { - tc := func(t *testing.T, changed bool) (timesRan uint64) { - t.Helper() - - var integrationsWg sync.WaitGroup - var starts atomic.Uint64 - - mockIntegration := FuncIntegration(func(ctx context.Context) error { - integrationsWg.Done() - starts.Inc() - <-ctx.Done() - return nil - }) - - cfg := controllerConfig{ - mockConfig{ - NameFunc: func() string { return mockIntegrationName }, - ConfigEqualsFunc: func(Config) bool { return !changed }, - ApplyDefaultsFunc: func(g Globals) error { return nil }, - IdentifierFunc: func(Globals) (string, error) { - return mockIntegrationName, nil - }, - NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { - integrationsWg.Add(1) - return mockIntegration, nil - }, - }, - } - - globals := Globals{} - ctrl, err := newController(util.TestLogger(t), cfg, globals) - require.NoError(t, err, "failed to create controller") - - sc := newSyncController(t, ctrl) - require.NoError(t, sc.UpdateController(cfg, globals), "failed to re-apply config") - - // Wait for our integrations to have been started - integrationsWg.Wait() - - sc.Stop() - return starts.Load() - } - - t.Run("Unchanged", func(t *testing.T) { - starts := tc(t, false) - require.Equal(t, uint64(1), starts, "integration should only have started exactly once") - }) - - t.Run("Changed", func(t *testing.T) { - starts := tc(t, true) - require.Equal(t, uint64(2), starts, "integration should have started exactly twice") - }) -} - -func Test_controller_SingletonCheck(t *testing.T) { - var integrationsWg sync.WaitGroup - var starts atomic.Uint64 - - mockIntegration := FuncIntegration(func(ctx context.Context) error { - integrationsWg.Done() - starts.Inc() - <-ctx.Done() - return nil - }) - c1 := mockConfig{ - NameFunc: func() string { return mockIntegrationName }, - ConfigEqualsFunc: func(Config) bool { return true }, - ApplyDefaultsFunc: func(g Globals) error { return nil }, - IdentifierFunc: func(Globals) (string, error) { - return mockIntegrationName, nil - }, - NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { - integrationsWg.Add(1) - return mockIntegration, nil - }, - } - configMap := make(map[Config]Type) - configMap[&c1] = TypeSingleton - setRegistered(t, configMap) - cfg := controllerConfig{ - c1, - c1, - } - - globals := Globals{} - _, err := newController(util.TestLogger(t), cfg, globals) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), `integration "mock" may only be defined once`)) -} - -type syncController struct { - inner *controller - pool *workerPool -} - -// newSyncController pairs an unstarted controller with a manually managed -// worker pool to synchronously apply integrations. -func newSyncController(t *testing.T, inner *controller) *syncController { - t.Helper() - - sc := &syncController{ - inner: inner, - pool: newWorkerPool(context.Background(), inner.logger), - } - - // There's always immediately one queued integration set from any - // successfully created controller. - sc.refresh() - return sc -} - -func (sc *syncController) refresh() { - sc.inner.mut.Lock() - defer sc.inner.mut.Unlock() - - newIntegrations := <-sc.inner.runIntegrations - sc.pool.Reload(newIntegrations) - sc.inner.integrations = newIntegrations -} - -func (sc *syncController) UpdateController(c controllerConfig, g Globals) error { - err := sc.inner.UpdateController(c, g) - if err != nil { - return err - } - sc.refresh() - return nil -} - -func (sc *syncController) Stop() { - sc.pool.Close() -} - -const mockIntegrationName = "mock" - -type mockConfig struct { - NameFunc func() string - ApplyDefaultsFunc func(Globals) error - ConfigEqualsFunc func(Config) bool - IdentifierFunc func(Globals) (string, error) - NewIntegrationFunc func(log.Logger, Globals) (Integration, error) -} - -func (mc mockConfig) Name() string { - return mc.NameFunc() -} - -func (mc mockConfig) ConfigEquals(c Config) bool { - if mc.ConfigEqualsFunc != nil { - return mc.ConfigEqualsFunc(c) - } - return false -} - -func (mc mockConfig) ApplyDefaults(g Globals) error { - return mc.ApplyDefaultsFunc(g) -} - -func (mc mockConfig) Identifier(g Globals) (string, error) { - return mc.IdentifierFunc(g) -} - -func (mc mockConfig) NewIntegration(l log.Logger, g Globals) (Integration, error) { - return mc.NewIntegrationFunc(l, g) -} - -func (mc mockConfig) WithNewIntegrationFunc(f func(log.Logger, Globals) (Integration, error)) mockConfig { - return mockConfig{ - NameFunc: mc.NameFunc, - ApplyDefaultsFunc: mc.ApplyDefaultsFunc, - ConfigEqualsFunc: mc.ConfigEqualsFunc, - IdentifierFunc: mc.IdentifierFunc, - NewIntegrationFunc: f, - } -} - -func mockConfigNameTuple(t *testing.T, name, id string) mockConfig { - t.Helper() - - return mockConfig{ - NameFunc: func() string { return name }, - IdentifierFunc: func(_ Globals) (string, error) { return id, nil }, - ApplyDefaultsFunc: func(g Globals) error { return nil }, - NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { - return NoOpIntegration, nil - }, - } -} - -// mockConfigForIntegration returns a Config that will always return i. -func mockConfigForIntegration(t *testing.T, i Integration) mockConfig { - t.Helper() - - return mockConfig{ - NameFunc: func() string { return mockIntegrationName }, - ApplyDefaultsFunc: func(g Globals) error { return nil }, - IdentifierFunc: func(Globals) (string, error) { - return mockIntegrationName, nil - }, - NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { - return i, nil - }, - } -} diff --git a/internal/static/integrations/v2/controller_updateintegration_test.go b/internal/static/integrations/v2/controller_updateintegration_test.go deleted file mode 100644 index 4f0940ed6d..0000000000 --- a/internal/static/integrations/v2/controller_updateintegration_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package integrations - -import ( - "context" - "sync" - "testing" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -// -// Tests for controller's utilization of the UpdateIntegration interface. -// - -// Test_controller_UpdateIntegration ensures that the controller will call -// UpdateIntegration for integrations that support it. -func Test_controller_UpdateIntegration(t *testing.T) { - var ( - integrationStartWg sync.WaitGroup - applies, starts atomic.Uint64 - ) - - mockIntegration := mockUpdateIntegration{ - Integration: FuncIntegration(func(ctx context.Context) error { - starts.Inc() - integrationStartWg.Done() - <-ctx.Done() - return nil - }), - ApplyConfigFunc: func(Config, Globals) error { - applies.Inc() - return nil - }, - } - - cfg := controllerConfig{ - mockConfig{ - NameFunc: func() string { return mockIntegrationName }, - ConfigEqualsFunc: func(Config) bool { return false }, - ApplyDefaultsFunc: func(g Globals) error { return nil }, - IdentifierFunc: func(Globals) (string, error) { - return mockIntegrationName, nil - }, - NewIntegrationFunc: func(log.Logger, Globals) (Integration, error) { - integrationStartWg.Add(1) - return mockIntegration, nil - }, - }, - } - - ctrl, err := newController(util.TestLogger(t), cfg, Globals{}) - require.NoError(t, err, "failed to create controller") - - sc := newSyncController(t, ctrl) - - // Wait for our integration to start. - integrationStartWg.Wait() - - // Try to apply again. - require.NoError(t, sc.UpdateController(cfg, ctrl.globals), "failed to re-apply config") - integrationStartWg.Wait() - - sc.Stop() - - require.Equal(t, uint64(1), applies.Load(), "dynamic reload should have occurred") - require.Equal(t, uint64(1), starts.Load(), "restart should not have occurred") -} - -type mockUpdateIntegration struct { - Integration - ApplyConfigFunc func(Config, Globals) error -} - -func (m mockUpdateIntegration) ApplyConfig(c Config, g Globals) error { - return m.ApplyConfigFunc(c, g) -} diff --git a/internal/static/integrations/v2/eventhandler/eventhandler.go b/internal/static/integrations/v2/eventhandler/eventhandler.go deleted file mode 100644 index efb94e206d..0000000000 --- a/internal/static/integrations/v2/eventhandler/eventhandler.go +++ /dev/null @@ -1,472 +0,0 @@ -// Package eventhandler watches for Kubernetes Event objects and hands them off to -// Agent's Logs subsystem (embedded promtail) -package eventhandler - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/loki/clients/pkg/promtail/api" - "github.com/grafana/loki/pkg/logproto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -const ( - cacheFileMode = 0600 - logFormatJson = "json" - logFormatFmt = "logfmt" -) - -// EventHandler watches for Kubernetes Event objects and hands them off to -// Agent's logs subsystem (embedded promtail). -type EventHandler struct { - LogsClient *logs.Logs - LogsInstance string - Log log.Logger - CachePath string - LastEvent *ShippedEvents - InitEvent *ShippedEvents - EventInformer cache.SharedIndexInformer - SendTimeout time.Duration - ticker *time.Ticker - instance string - extraLabels labels.Labels - logFormat string - sync.Mutex -} - -// ShippedEvents stores a timestamp and map of event ResourceVersions shipped for that timestamp. -// Used to avoid double-shipping events upon restart. -type ShippedEvents struct { - // shipped event's timestamp - Timestamp time.Time `json:"ts"` - // map of event RVs (resource versions) already "shipped" (handed off) for this timestamp. - // this is to handle the case of a timestamp having multiple events, - // which happens quite frequently. - RvMap map[string]struct{} `json:"resourceVersion"` -} - -func newEventHandler(l log.Logger, globals integrations.Globals, c *Config) (integrations.Integration, error) { - var ( - config *rest.Config - err error - factory informers.SharedInformerFactory - id string - ) - - // Try using KubeconfigPath or inClusterConfig - config, err = clientcmd.BuildConfigFromFlags("", c.KubeconfigPath) - if err != nil { - level.Error(l).Log("msg", "Loading from KubeconfigPath or inClusterConfig failed", "err", err) - // Trying default home location - if home := homedir.HomeDir(); home != "" { - kubeconfigPath := filepath.Join(home, ".kube", "config") - config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - level.Error(l).Log("msg", "Could not load a kubeconfig", "err", err) - return nil, err - } - } else { - err = fmt.Errorf("could not load a kubeconfig") - return nil, err - } - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, err - } - - // get an informer - if c.Namespace == "" { - factory = informers.NewSharedInformerFactory(clientset, time.Duration(c.InformerResync)*time.Second) - } else { - factory = informers.NewSharedInformerFactoryWithOptions(clientset, time.Duration(c.InformerResync)*time.Second, informers.WithNamespace(c.Namespace)) - } - - eventInformer := factory.Core().V1().Events().Informer() - id, _ = c.Identifier(globals) - - eh := &EventHandler{ - LogsClient: globals.Logs, - LogsInstance: c.LogsInstance, - Log: l, - CachePath: c.CachePath, - EventInformer: eventInformer, - SendTimeout: time.Duration(c.SendTimeout) * time.Second, - instance: id, - extraLabels: c.ExtraLabels, - logFormat: c.LogFormat, - } - // set the resource handler fns - if err := eh.initInformer(eventInformer); err != nil { - return nil, err - } - eh.ticker = time.NewTicker(time.Duration(c.FlushInterval) * time.Second) - return eh, nil -} - -// Initialize informer by setting event handler fns -func (eh *EventHandler) initInformer(eventsInformer cache.SharedIndexInformer) error { - _, err := eventsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: eh.addEvent, - UpdateFunc: eh.updateEvent, - DeleteFunc: eh.deleteEvent, - }) - return err -} - -// Handles new event objects -func (eh *EventHandler) addEvent(obj interface{}) { - event, _ := obj.(*v1.Event) - - err := eh.handleEvent(event) - if err != nil { - level.Error(eh.Log).Log("msg", "Error handling event", "err", err, "event", event) - } -} - -// Handles event object updates. Note that this get triggered on informer resyncs and also -// events occurring more than once (in which case .count is incremented) -func (eh *EventHandler) updateEvent(objOld interface{}, objNew interface{}) { - eOld, _ := objOld.(*v1.Event) - eNew, _ := objNew.(*v1.Event) - - if eOld.GetResourceVersion() == eNew.GetResourceVersion() { - // ignore resync updates - level.Debug(eh.Log).Log("msg", "Event RV didn't change, ignoring", "eRV", eNew.ResourceVersion) - return - } - - err := eh.handleEvent(eNew) - if err != nil { - level.Error(eh.Log).Log("msg", "Error handling event", "err", err, "event", eNew) - } -} - -func (eh *EventHandler) handleEvent(event *v1.Event) error { - eventTs := getTimestamp(event) - - // if event is older than the one stored in cache on startup, we've shipped it - if eventTs.Before(eh.InitEvent.Timestamp) { - return nil - } - // if event is equal and is in map, we've shipped it - if eventTs.Equal(eh.InitEvent.Timestamp) { - if _, ok := eh.InitEvent.RvMap[event.ResourceVersion]; ok { - return nil - } - } - - labels, msg, err := eh.extractEvent(event) - if err != nil { - return err - } - - entry := newEntry(msg, eventTs, labels) - ok := eh.LogsClient.Instance(eh.LogsInstance).SendEntry(entry, eh.SendTimeout) - if !ok { - err = fmt.Errorf("msg=%s entry=%s", "error handing entry off to promtail", entry) - return err - } - - // update cache with new "last" event - err = eh.updateLastEvent(event, eventTs) - if err != nil { - return err - } - return nil -} - -// Called when event objects are removed from etcd, can safely ignore this -func (eh *EventHandler) deleteEvent(obj interface{}) { -} - -// extract data from event fields and create labels, etc. -// TODO: ship JSON blobs and allow users to configure using pipelines etc. -// instead of hardcoding labels here -func (eh *EventHandler) extractEvent(event *v1.Event) (model.LabelSet, string, error) { - var ( - msg strings.Builder - fields = make(map[string]any) - labels = make(model.LabelSet) - appender = appendTextMsg - ) - - if eh.logFormat == logFormatJson { - appender = appendJsonMsg - } - - obj := event.InvolvedObject - if obj.Name == "" { - return nil, "", fmt.Errorf("no involved object for event") - } - appender(&msg, fields, "name", obj.Name, "%s") - - labels[model.LabelName("namespace")] = model.LabelValue(obj.Namespace) - // TODO(hjet) omit "kubernetes" - labels[model.LabelName("job")] = model.LabelValue("integrations/kubernetes/eventhandler") - labels[model.LabelName("instance")] = model.LabelValue(eh.instance) - labels[model.LabelName("agent_hostname")] = model.LabelValue(eh.instance) - for _, lbl := range eh.extraLabels { - labels[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) - } - - // we add these fields to the log line to reduce label bloat and cardinality - if obj.Kind != "" { - appender(&msg, fields, "kind", obj.Kind, "%s") - } - if event.Action != "" { - appender(&msg, fields, "action", event.Action, "%s") - } - if obj.APIVersion != "" { - appender(&msg, fields, "objectAPIversion", obj.APIVersion, "%s") - } - if obj.ResourceVersion != "" { - appender(&msg, fields, "objectRV", obj.ResourceVersion, "%s") - } - if event.ResourceVersion != "" { - appender(&msg, fields, "eventRV", event.ResourceVersion, "%s") - } - if event.ReportingInstance != "" { - appender(&msg, fields, "reportinginstance", event.ReportingInstance, "%s") - } - if event.ReportingController != "" { - appender(&msg, fields, "reportingcontroller", event.ReportingController, "%s") - } - if event.Source.Component != "" { - appender(&msg, fields, "sourcecomponent", event.Source.Component, "%s") - } - if event.Source.Host != "" { - appender(&msg, fields, "sourcehost", event.Source.Host, "%s") - } - if event.Reason != "" { - appender(&msg, fields, "reason", event.Reason, "%s") - } - if event.Type != "" { - appender(&msg, fields, "type", event.Type, "%s") - } - if event.Count != 0 { - appender(&msg, fields, "count", event.Count, "%d") - } - - appender(&msg, fields, "msg", event.Message, "%q") - - if eh.logFormat == logFormatJson { - bb, err := json.Marshal(fields) - if err != nil { - return nil, "", fmt.Errorf("failed to marshal Event to JSON: %w", err) - } - msg.WriteString(string(bb)) - } - - return labels, strings.TrimSpace(msg.String()), nil -} - -// Appends the "fields" map with an entry for the provided event field -// Signatures of "appendJsonMsg" and "appendTextMsg" must match -func appendJsonMsg(msg *strings.Builder, fields map[string]any, key string, value any, format string) { - fields[key] = value -} - -// Appends the message builder with the provided event field -// Signatures of "appendJsonMsg" and "appendTextMsg" must match -func appendTextMsg(msg *strings.Builder, fields map[string]any, key string, value any, format string) { - msg.WriteString(key) - msg.WriteByte('=') - msg.WriteString(fmt.Sprintf(format, value)) - msg.WriteByte(' ') -} - -func getTimestamp(event *v1.Event) time.Time { - if !event.LastTimestamp.IsZero() { - return event.LastTimestamp.Time - } - return event.EventTime.Time -} - -func newEntry(msg string, ts time.Time, labels model.LabelSet) api.Entry { - entry := logproto.Entry{Timestamp: ts, Line: msg} - return api.Entry{Labels: labels, Entry: entry} -} - -// maintain "last event" state -func (eh *EventHandler) updateLastEvent(e *v1.Event, eventTs time.Time) error { - eh.Lock() - defer eh.Unlock() - - eventRv := e.ResourceVersion - - if eh.LastEvent == nil { - // startup - eh.LastEvent = &ShippedEvents{Timestamp: eventTs, RvMap: make(map[string]struct{})} - eh.LastEvent.RvMap[eventRv] = struct{}{} - return nil - } - - // if timestamp is the same, add to map - if eh.LastEvent != nil && eventTs.Equal(eh.LastEvent.Timestamp) { - eh.LastEvent.RvMap[eventRv] = struct{}{} - return nil - } - - // if timestamp is different, create a new ShippedEvents struct - eh.LastEvent = &ShippedEvents{Timestamp: eventTs, RvMap: make(map[string]struct{})} - eh.LastEvent.RvMap[eventRv] = struct{}{} - return nil -} - -func (eh *EventHandler) writeOutLastEvent() error { - level.Info(eh.Log).Log("msg", "Flushing last event to disk") - - eh.Lock() - defer eh.Unlock() - - if eh.LastEvent == nil { - level.Info(eh.Log).Log("msg", "No last event to flush, returning") - return nil - } - - temp := eh.CachePath + "-new" - buf, err := json.Marshal(&eh.LastEvent) - if err != nil { - return err - } - - err = os.WriteFile(temp, buf, os.FileMode(cacheFileMode)) - if err != nil { - return err - } - - if err = os.Rename(temp, eh.CachePath); err != nil { - return err - } - level.Info(eh.Log).Log("msg", "Flushed last event to disk") - return nil -} - -// RunIntegration runs the eventhandler integration -func (eh *EventHandler) RunIntegration(ctx context.Context) error { - var wg sync.WaitGroup - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Quick check to make sure logs instance exists - if i := eh.LogsClient.Instance(eh.LogsInstance); i == nil { - level.Error(eh.Log).Log("msg", "Logs instance not configured", "instance", eh.LogsInstance) - cancel() - } - - cacheDir := filepath.Dir(eh.CachePath) - if err := os.MkdirAll(cacheDir, 0755); err != nil { - level.Error(eh.Log).Log("msg", "Failed to create cache dir", "err", err) - cancel() - } - - // cache file to store events shipped (prevents double shipping on restart) - cacheFile, err := os.OpenFile(eh.CachePath, os.O_RDWR|os.O_CREATE, cacheFileMode) - if err != nil { - level.Error(eh.Log).Log("msg", "Failed to open or create cache file", "err", err) - cancel() - } - - // attempt to read last timestamp from cache file into a ShippedEvents struct - initEvent, err := readInitEvent(cacheFile, eh.Log) - if err != nil { - level.Error(eh.Log).Log("msg", "Failed to read last event from cache file", "err", err) - cancel() - } - eh.InitEvent = initEvent - - if err = cacheFile.Close(); err != nil { - level.Error(eh.Log).Log("msg", "Failed to close cache file", "err", err) - cancel() - } - - go func() { - level.Info(eh.Log).Log("msg", "Waiting for cache to sync (initial List of events)") - isSynced := cache.WaitForCacheSync(ctx.Done(), eh.EventInformer.HasSynced) - if !isSynced { - level.Error(eh.Log).Log("msg", "Failed to sync informer cache") - // maybe want to bail here - return - } - level.Info(eh.Log).Log("msg", "Informer cache synced") - }() - - // start the informer - // technically we should prob use the factory here, but since we - // only have one informer atm, this likely doesn't matter - go eh.EventInformer.Run(ctx.Done()) - - // wait for last event to flush before returning - wg.Add(1) - go func() { - defer wg.Done() - eh.runTicker(ctx.Done()) - }() - wg.Wait() - - return nil -} - -// write out last event every FlushInterval -func (eh *EventHandler) runTicker(stopCh <-chan struct{}) { - for { - select { - case <-stopCh: - if err := eh.writeOutLastEvent(); err != nil { - level.Error(eh.Log).Log("msg", "Failed to flush last event", "err", err) - } - return - case <-eh.ticker.C: - if err := eh.writeOutLastEvent(); err != nil { - level.Error(eh.Log).Log("msg", "Failed to flush last event", "err", err) - } - } - } -} - -func readInitEvent(file *os.File, logger log.Logger) (*ShippedEvents, error) { - var ( - initEvent = new(ShippedEvents) - ) - - stat, err := file.Stat() - if err != nil { - return nil, err - } - if stat.Size() == 0 { - level.Info(logger).Log("msg", "Cache file empty, setting zero-valued initEvent") - return initEvent, nil - } - - dec := json.NewDecoder(file) - err = dec.Decode(&initEvent) - if err != nil { - err = fmt.Errorf("could not read init event from cache: %s. Please delete the cache file", err) - return nil, err - } - level.Info(logger).Log("msg", "Loaded init event from cache file", "initEventTime", initEvent.Timestamp) - return initEvent, nil -} diff --git a/internal/static/integrations/v2/eventhandler/eventhandler_test.go b/internal/static/integrations/v2/eventhandler/eventhandler_test.go deleted file mode 100644 index 9d6e08a32a..0000000000 --- a/internal/static/integrations/v2/eventhandler/eventhandler_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package eventhandler - -import ( - "os" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - - "github.com/go-kit/log" - "github.com/stretchr/testify/require" -) - -func TestCacheLoad(t *testing.T) { - l := log.NewNopLogger() - testTime, _ := time.Parse(time.RFC3339, "2022-01-26T13:39:40-05:00") - expectedEvents := &ShippedEvents{ - Timestamp: testTime, - RvMap: map[string]struct{}{"58588": {}}, - } - cacheFile, err := os.OpenFile("testdata/eventhandler.cache", os.O_RDWR|os.O_CREATE, cacheFileMode) - require.NoError(t, err, "Failed to open test eventhandler cache file") - actualEvents, err := readInitEvent(cacheFile, l) - require.NoError(t, err, "Failed to parse last event from eventhandler cache file") - require.Equal(t, expectedEvents, actualEvents) -} - -func TestExtractEventJson(t *testing.T) { - var eh = new(EventHandler) - eh.logFormat = logFormatJson - var event = new(v1.Event) - event.InvolvedObject = v1.ObjectReference{ - Name: "test-object", - } - event.Message = "Event Message" - - _, msg, err := eh.extractEvent(event) - require.NoError(t, err, "Failed to extract test event") - require.Equal(t, "{\"msg\":\"Event Message\",\"name\":\"test-object\"}", msg) -} - -func TestExtractEventText(t *testing.T) { - var eh = new(EventHandler) - eh.logFormat = "logfmt" - var event = new(v1.Event) - event.InvolvedObject = v1.ObjectReference{ - Name: "test-object", - } - event.Message = "Event Message" - - _, msg, err := eh.extractEvent(event) - require.NoError(t, err, "Failed to extract test event") - require.Equal(t, "name=test-object msg=\"Event Message\"", msg) -} diff --git a/internal/static/integrations/v2/eventhandler/integration.go b/internal/static/integrations/v2/eventhandler/integration.go index caba0084e2..4453aeefce 100644 --- a/internal/static/integrations/v2/eventhandler/integration.go +++ b/internal/static/integrations/v2/eventhandler/integration.go @@ -1,6 +1,8 @@ package eventhandler import ( + "context" + "github.com/go-kit/log" "github.com/grafana/agent/internal/static/integrations/v2" "github.com/prometheus/prometheus/model/labels" @@ -13,7 +15,7 @@ var DefaultConfig = Config{ LogsInstance: "default", InformerResync: 120, FlushInterval: 10, - LogFormat: logFormatFmt, + LogFormat: "logfmt", } // Config configures the eventhandler integration @@ -71,9 +73,20 @@ func (c *Config) Identifier(globals integrations.Globals) (string, error) { // NewIntegration converts this config into an instance of an integration. func (c *Config) NewIntegration(l log.Logger, globals integrations.Globals) (integrations.Integration, error) { - return newEventHandler(l, globals, c) + // NOTE(rfratto): the eventhandler integration is never run, and all the + // logic has been moved to the loki.source.kubernetes_events component. + // + // This function is never called, but still exists for config conversion. + return stubIntegration{}, nil } func init() { integrations.Register(&Config{}, integrations.TypeSingleton) } + +type stubIntegration struct{} + +func (stubIntegration) RunIntegration(ctx context.Context) error { + <-ctx.Done() + return nil +} diff --git a/internal/static/integrations/v2/eventhandler/testdata/eventhandler.cache b/internal/static/integrations/v2/eventhandler/testdata/eventhandler.cache deleted file mode 100644 index dc5814de08..0000000000 --- a/internal/static/integrations/v2/eventhandler/testdata/eventhandler.cache +++ /dev/null @@ -1 +0,0 @@ -{"ts":"2022-01-26T13:39:40-05:00","resourceVersion":{"58588":{}}} \ No newline at end of file diff --git a/internal/static/integrations/v2/integrations.go b/internal/static/integrations/v2/integrations.go index 1896280e3f..3c5ba8d3ab 100644 --- a/internal/static/integrations/v2/integrations.go +++ b/internal/static/integrations/v2/integrations.go @@ -26,10 +26,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics" "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/static/traces" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -84,14 +81,6 @@ type Globals struct { // TODO(rfratto): flag to override identifier at agent level? AgentIdentifier string - // Some integrations may wish to interact with various subsystems for their - // implementation if the desired behavior is not supported natively by the - // integration manager. - - Metrics *metrics.Agent // Metrics subsystem - Logs *logs.Logs // Logs subsystem - Tracing *traces.Traces // Traces subsystem - // Options the integrations subsystem is using. SubsystemOpts SubsystemOptions // BaseURL to use to invoke methods against the embedded HTTP server. diff --git a/internal/static/integrations/v2/subsystem.go b/internal/static/integrations/v2/subsystem.go index ca128a1926..ce501b37c6 100644 --- a/internal/static/integrations/v2/subsystem.go +++ b/internal/static/integrations/v2/subsystem.go @@ -1,19 +1,8 @@ package integrations import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/gorilla/mux" "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" "github.com/grafana/agent/internal/static/metrics" - "github.com/prometheus/common/model" - http_sd "github.com/prometheus/prometheus/discovery/http" ) const ( @@ -76,172 +65,3 @@ func (o *SubsystemOptions) UnmarshalYAML(unmarshal func(interface{}) error) erro *o = DefaultSubsystemOptions return UnmarshalYAML(o, unmarshal) } - -// Subsystem runs the integrations subsystem, managing a set of integrations. -type Subsystem struct { - logger log.Logger - - mut sync.RWMutex - globals Globals - apiHandler http.Handler // generated from controller - autoscraper *autoscrape.Scraper - - ctrl *controller - stopController context.CancelFunc - controllerExited chan struct{} -} - -// NewSubsystem creates and starts a new integrations Subsystem. Every field in -// IntegrationOptions must be filled out. -func NewSubsystem(l log.Logger, globals Globals) (*Subsystem, error) { - autoscraper := autoscrape.NewScraper(l, globals.Metrics.InstanceManager(), globals.DialContextFunc) - - l = log.With(l, "component", "integrations") - - ctrl, err := newController(l, controllerConfig(globals.SubsystemOpts.Configs), globals) - if err != nil { - autoscraper.Stop() - return nil, err - } - - ctx, cancel := context.WithCancel(context.Background()) - - ctrlExited := make(chan struct{}) - go func() { - ctrl.run(ctx) - close(ctrlExited) - }() - - s := &Subsystem{ - logger: l, - - globals: globals, - autoscraper: autoscraper, - - ctrl: ctrl, - stopController: cancel, - controllerExited: ctrlExited, - } - if err := s.ApplyConfig(globals); err != nil { - cancel() - autoscraper.Stop() - return nil, err - } - return s, nil -} - -// ApplyConfig updates the configuration of the integrations subsystem. -func (s *Subsystem) ApplyConfig(globals Globals) error { - const prefix = "/integrations/" - - s.mut.Lock() - defer s.mut.Unlock() - - if err := s.ctrl.UpdateController(controllerConfig(globals.SubsystemOpts.Configs), globals); err != nil { - return fmt.Errorf("error applying integrations: %w", err) - } - - var firstErr error - saveFirstErr := func(err error) { - if firstErr == nil { - firstErr = err - } - } - - // Set up HTTP wiring - { - handler, err := s.ctrl.Handler(prefix) - if err != nil { - saveFirstErr(fmt.Errorf("HTTP handler update failed: %w", err)) - } - s.apiHandler = handler - } - - // Set up self-scraping - { - httpSDConfig := http_sd.DefaultSDConfig - httpSDConfig.RefreshInterval = model.Duration(time.Second * 5) // TODO(rfratto): make configurable? - - apiURL := globals.CloneAgentBaseURL() - apiURL.Path = IntegrationsSDEndpoint - httpSDConfig.URL = apiURL.String() - - scrapeConfigs := s.ctrl.ScrapeConfigs(prefix, &httpSDConfig) - if err := s.autoscraper.ApplyConfig(scrapeConfigs); err != nil { - saveFirstErr(fmt.Errorf("configuring autoscraper failed: %w", err)) - } - } - - s.globals = globals - return firstErr -} - -// WireAPI hooks up integration endpoints to r. -func (s *Subsystem) WireAPI(r *mux.Router) { - const prefix = "/integrations" - r.PathPrefix(prefix).HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - s.mut.RLock() - handler := s.apiHandler - s.mut.RUnlock() - - if handler == nil { - rw.WriteHeader(http.StatusServiceUnavailable) - fmt.Fprintf(rw, "Integrations HTTP endpoints not yet available") - return - } - handler.ServeHTTP(rw, r) - }) - - r.HandleFunc(IntegrationsSDEndpoint, func(rw http.ResponseWriter, r *http.Request) { - targetOptions, err := TargetOptionsFromParams(r.URL.Query()) - if err != nil { - http.Error(rw, fmt.Sprintf("invalid query parameters: %s", err), http.StatusBadRequest) - return - } - - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - - tgs := s.ctrl.Targets(Endpoint{ - Host: r.Host, - Prefix: prefix, - }, targetOptions) - - // Normalize targets. We may have targets in the group with non-address - // labels. These need to be retained, so we'll just split everything up - // into multiple groups. - // - // TODO(rfratto): optimize to remove redundant groups - finalTgs := []*targetGroup{} - for _, group := range tgs { - for _, target := range group.Targets { - // Create the final labels for the group. This will be everything from - // the group and the target (except for model.AddressLabel). Labels - // from target take precedence labels from over group. - groupLabels := group.Labels.Merge(target) - delete(groupLabels, model.AddressLabel) - - finalTgs = append(finalTgs, &targetGroup{ - Targets: []model.LabelSet{{model.AddressLabel: target[model.AddressLabel]}}, - Labels: groupLabels, - }) - } - } - - enc := json.NewEncoder(rw) - _ = enc.Encode(finalTgs) - }) - - r.HandleFunc(IntegrationsAutoscrapeTargetsEndpoint, func(rw http.ResponseWriter, r *http.Request) { - allTargets := s.autoscraper.TargetsActive() - metrics.ListTargetsHandler(allTargets).ServeHTTP(rw, r) - }) -} - -// Stop stops the manager and all running integrations. Blocks until all -// running integrations exit. -func (s *Subsystem) Stop() { - s.autoscraper.Stop() - s.stopController() - <-s.controllerExited -} diff --git a/internal/static/integrations/v2/targetgroup.go b/internal/static/integrations/v2/targetgroup.go deleted file mode 100644 index 4400105c4f..0000000000 --- a/internal/static/integrations/v2/targetgroup.go +++ /dev/null @@ -1,28 +0,0 @@ -package integrations - -import ( - "encoding/json" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -// targetGroup implements json.Marshaler for targetgroup.Group. This is -// required do to an issue with Prometheus: HTTP SD expects to be unmarshaled -// as JSON, but the form it expects to unmarshal the target groups in is not the form -// it marshals out to JSON as. -type targetGroup targetgroup.Group - -func (tg *targetGroup) MarshalJSON() ([]byte, error) { - g := &struct { - Targets []string `json:"targets"` - Labels model.LabelSet `json:"labels,omitempty"` - }{ - Targets: make([]string, 0, len(tg.Targets)), - Labels: tg.Labels, - } - for _, t := range tg.Targets { - g.Targets = append(g.Targets, string(t[model.AddressLabel])) - } - return json.Marshal(g) -} diff --git a/internal/static/integrations/v2/workers.go b/internal/static/integrations/v2/workers.go deleted file mode 100644 index 4315710c49..0000000000 --- a/internal/static/integrations/v2/workers.go +++ /dev/null @@ -1,122 +0,0 @@ -package integrations - -import ( - "context" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" -) - -type workerPool struct { - log log.Logger - parentCtx context.Context - - mut sync.Mutex - workers map[*controlledIntegration]worker - - runningWorkers sync.WaitGroup -} - -type worker struct { - ci *controlledIntegration - stop context.CancelFunc - exited chan struct{} -} - -func newWorkerPool(ctx context.Context, l log.Logger) *workerPool { - return &workerPool{ - log: l, - parentCtx: ctx, - - workers: make(map[*controlledIntegration]worker), - } -} - -func (p *workerPool) Reload(newIntegrations []*controlledIntegration) { - p.mut.Lock() - defer p.mut.Unlock() - - level.Debug(p.log).Log("msg", "updating running integrations", "prev_count", len(p.workers), "new_count", len(newIntegrations)) - - // Shut down workers whose integrations have gone away. - var stopped []worker - for ci, w := range p.workers { - var found bool - for _, current := range newIntegrations { - if ci == current { - found = true - break - } - } - if !found { - w.stop() - stopped = append(stopped, w) - } - } - for _, w := range stopped { - // Wait for stopped integrations to fully exit. We do this in a separate - // loop so context cancellations can be handled simultaneously, allowing - // the wait to complete faster. - <-w.exited - } - - // Spawn new workers for integrations that don't have them. - for _, current := range newIntegrations { - if _, workerExists := p.workers[current]; workerExists { - continue - } - // This integration doesn't have an existing worker; schedule a new one. - p.scheduleWorker(current) - } -} - -func (p *workerPool) Close() { - p.mut.Lock() - defer p.mut.Unlock() - - level.Debug(p.log).Log("msg", "stopping all integrations") - - defer p.runningWorkers.Wait() - for _, w := range p.workers { - w.stop() - } -} - -func (p *workerPool) scheduleWorker(ci *controlledIntegration) { - p.runningWorkers.Add(1) - - ctx, cancel := context.WithCancel(p.parentCtx) - - w := worker{ - ci: ci, - stop: cancel, - exited: make(chan struct{}), - } - p.workers[ci] = w - - go func() { - ci.running.Store(true) - - // When the integration stops running, we want to free any of our - // resources that will notify watchers waiting for the worker to stop. - // - // Afterwards, we'll block until we remove ourselves from the map; having - // a worker remove itself on shutdown allows exited integrations to - // re-start when the config is reloaded. - defer func() { - ci.running.Store(false) - close(w.exited) - p.runningWorkers.Done() - - p.mut.Lock() - defer p.mut.Unlock() - delete(p.workers, ci) - }() - - err := ci.i.RunIntegration(ctx) - if err != nil { - level.Error(p.log).Log("msg", "integration exited with error", "id", ci.id, "err", err) - } - }() -} diff --git a/internal/static/logs/http.go b/internal/static/logs/http.go deleted file mode 100644 index b3e7a00d88..0000000000 --- a/internal/static/logs/http.go +++ /dev/null @@ -1,84 +0,0 @@ -package logs - -import ( - "net/http" - "sort" - - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/grafana/loki/clients/pkg/promtail/targets/target" - "github.com/prometheus/common/model" -) - -// WireAPI adds API routes to the provided mux router. -func (l *Logs) WireAPI(r *mux.Router) { - r.HandleFunc("/agent/api/v1/logs/instances", l.ListInstancesHandler).Methods("GET") - r.HandleFunc("/agent/api/v1/logs/targets", l.ListTargetsHandler).Methods("GET") -} - -// ListInstancesHandler writes the set of currently running instances to the http.ResponseWriter. -func (l *Logs) ListInstancesHandler(w http.ResponseWriter, _ *http.Request) { - instances := l.instances - instanceNames := make([]string, 0, len(instances)) - for instance := range instances { - instanceNames = append(instanceNames, instance) - } - sort.Strings(instanceNames) - - err := configapi.WriteResponse(w, http.StatusOK, instanceNames) - if err != nil { - level.Error(l.l).Log("msg", "failed to write response", "err", err) - } -} - -// ListTargetsHandler retrieves the full set of targets across all instances and shows -// information on them. -func (l *Logs) ListTargetsHandler(w http.ResponseWriter, r *http.Request) { - instances := l.instances - allTargets := make(map[string]TargetSet, len(instances)) - for instName, inst := range instances { - allTargets[instName] = inst.promtail.ActiveTargets() - } - listTargetsHandler(allTargets).ServeHTTP(w, r) -} - -func listTargetsHandler(targets map[string]TargetSet) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { - resp := ListTargetsResponse{} - for instance, tset := range targets { - for key, targets := range tset { - for _, tgt := range targets { - resp = append(resp, TargetInfo{ - InstanceName: instance, - TargetGroup: key, - Type: tgt.Type(), - DiscoveredLabels: tgt.DiscoveredLabels(), - Labels: tgt.Labels(), - Ready: tgt.Ready(), - Details: tgt.Details(), - }) - } - } - } - _ = configapi.WriteResponse(rw, http.StatusOK, resp) - }) -} - -// TargetSet is a set of targets for an individual scraper. -type TargetSet map[string][]target.Target - -// ListTargetsResponse is returned by the ListTargetsHandler. -type ListTargetsResponse []TargetInfo - -// TargetInfo describes a specific target. -type TargetInfo struct { - InstanceName string `json:"instance"` - TargetGroup string `json:"target_group"` - - Type target.TargetType `json:"type"` - Labels model.LabelSet `json:"labels"` - DiscoveredLabels model.LabelSet `json:"discovered_labels"` - Ready bool `json:"ready"` - Details interface{} `json:"details"` -} diff --git a/internal/static/logs/http_test.go b/internal/static/logs/http_test.go deleted file mode 100644 index e37110f205..0000000000 --- a/internal/static/logs/http_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package logs - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/grafana/agent/internal/util" - "github.com/grafana/loki/clients/pkg/promtail/targets/target" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" -) - -func TestAgent_ListInstancesHandler(t *testing.T) { - cfgText := util.Untab(` -configs: -- name: instance-a - positions: - filename: /tmp/positions.yaml - clients: - - url: http://127.0.0.1:80/loki/api/v1/push - `) - - var cfg Config - - logger := util.TestLogger(t) - l, err := New(prometheus.NewRegistry(), &cfg, logger, false) - require.NoError(t, err) - defer l.Stop() - - r := httptest.NewRequest("GET", "/agent/api/v1/logs/instances", nil) - - t.Run("no instances", func(t *testing.T) { - rr := httptest.NewRecorder() - l.ListInstancesHandler(rr, r) - expect := `{"status":"success","data":[]}` - require.Equal(t, expect, rr.Body.String()) - }) - - dec := yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&cfg)) - t.Run("non-empty", func(t *testing.T) { - require.NoError(t, l.ApplyConfig(&cfg, false)) - - expect := `{"status":"success","data":["instance-a"]}` - - util.Eventually(t, func(t require.TestingT) { - rr := httptest.NewRecorder() - l.ListInstancesHandler(rr, r) - require.Equal(t, expect, rr.Body.String()) - }) - }) -} - -func TestAgent_ListTargetsHandler(t *testing.T) { - cfgText := util.Untab(` -configs: -- name: instance-a - positions: - filename: /tmp/positions.yaml - clients: - - url: http://127.0.0.1:80/loki/api/v1/push - `) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&cfg)) - - logger := util.TestLogger(t) - l, err := New(prometheus.NewRegistry(), &cfg, logger, false) - require.NoError(t, err) - defer l.Stop() - - r := httptest.NewRequest("GET", "/agent/api/v1/logs/targets", nil) - - t.Run("scrape manager not ready", func(t *testing.T) { - rr := httptest.NewRecorder() - l.ListTargetsHandler(rr, r) - expect := `{"status": "success", "data": []}` - require.JSONEq(t, expect, rr.Body.String()) - require.Equal(t, http.StatusOK, rr.Result().StatusCode) - }) - - t.Run("scrape manager targets", func(t *testing.T) { - rr := httptest.NewRecorder() - targets := map[string]TargetSet{ - "instance-a": mockActiveTargets(), - } - listTargetsHandler(targets).ServeHTTP(rr, r) - expect := `{ - "status": "success", - "data": [ - { - "instance": "instance-a", - "target_group": "varlogs", - "type": "File", - "labels": { - "job": "varlogs" - }, - "discovered_labels": { - "__address__": "localhost", - "__path__": "/var/log/*log", - "job": "varlogs" - }, - "ready": true, - "details": { - "/var/log/alternatives.log": 13386, - "/var/log/apport.log": 0, - "/var/log/auth.log": 37009, - "/var/log/bootstrap.log": 107347, - "/var/log/dpkg.log": 374420, - "/var/log/faillog": 0, - "/var/log/fontconfig.log": 11629, - "/var/log/gpu-manager.log": 1541, - "/var/log/kern.log": 782582, - "/var/log/lastlog": 0, - "/var/log/syslog": 788450 - } - } - ] - }` - require.JSONEq(t, expect, rr.Body.String()) - require.Equal(t, http.StatusOK, rr.Result().StatusCode) - }) -} - -func mockActiveTargets() map[string][]target.Target { - return map[string][]target.Target{ - "varlogs": {&mockTarget{}}, - } -} - -type mockTarget struct { -} - -func (mt *mockTarget) Type() target.TargetType { - return target.TargetType("File") -} - -func (mt *mockTarget) DiscoveredLabels() model.LabelSet { - return map[model.LabelName]model.LabelValue{ - "__address__": "localhost", - "__path__": "/var/log/*log", - "job": "varlogs", - } -} - -func (mt *mockTarget) Labels() model.LabelSet { - return map[model.LabelName]model.LabelValue{ - "job": "varlogs", - } -} - -func (mt *mockTarget) Ready() bool { - return true -} - -func (mt *mockTarget) Details() interface{} { - return map[string]int{ - "/var/log/alternatives.log": 13386, - "/var/log/apport.log": 0, - "/var/log/auth.log": 37009, - "/var/log/bootstrap.log": 107347, - "/var/log/dpkg.log": 374420, - "/var/log/faillog": 0, - "/var/log/fontconfig.log": 11629, - "/var/log/gpu-manager.log": 1541, - "/var/log/kern.log": 782582, - "/var/log/lastlog": 0, - "/var/log/syslog": 788450, - } -} diff --git a/internal/static/logs/logs.go b/internal/static/logs/logs.go index 2d6c478fe5..8dd2035341 100644 --- a/internal/static/logs/logs.go +++ b/internal/static/logs/logs.go @@ -2,142 +2,20 @@ package logs import ( - "fmt" - "os" - "path/filepath" - "sync" - "time" _ "time/tzdata" // embed timezone data - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/agentseed" "github.com/grafana/agent/internal/useragent" - "github.com/grafana/agent/internal/util" - "github.com/grafana/loki/clients/pkg/promtail" - "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/client" "github.com/grafana/loki/clients/pkg/promtail/config" "github.com/grafana/loki/clients/pkg/promtail/server" - "github.com/grafana/loki/clients/pkg/promtail/targets/file" "github.com/grafana/loki/clients/pkg/promtail/wal" "github.com/grafana/loki/pkg/tracing" - "github.com/prometheus/client_golang/prometheus" ) func init() { client.UserAgent = useragent.Get() } -// Logs is a Logs log collection. It uses multiple distinct sets of Logs -// Promtail agents to collect logs and send them to a Logs server. -type Logs struct { - mut sync.Mutex - - reg prometheus.Registerer - l log.Logger - instances map[string]*Instance -} - -// New creates and starts Loki log collection. -func New(reg prometheus.Registerer, c *Config, l log.Logger, dryRun bool) (*Logs, error) { - logs := &Logs{ - instances: make(map[string]*Instance), - reg: reg, - l: log.With(l, "component", "logs"), - } - if err := logs.ApplyConfig(c, dryRun); err != nil { - return nil, err - } - return logs, nil -} - -// ApplyConfig updates Logs with a new Config. -func (l *Logs) ApplyConfig(c *Config, dryRun bool) error { - l.mut.Lock() - defer l.mut.Unlock() - - if c == nil { - c = &Config{} - } - - newInstances := make(map[string]*Instance, len(c.Configs)) - - for _, ic := range c.Configs { - // If an old instance existed, update it and move it to the new map. - if old, ok := l.instances[ic.Name]; ok { - err := old.ApplyConfig(ic, c.Global, dryRun) - if err != nil { - return err - } - - newInstances[ic.Name] = old - continue - } - - inst, err := NewInstance(l.reg, ic, c.Global, l.l, dryRun) - if err != nil { - return fmt.Errorf("unable to apply config for %s: %w", ic.Name, err) - } - newInstances[ic.Name] = inst - } - - // Any promtail in l.instances that isn't in newInstances has been removed - // from the config. Stop them before replacing the map. - for key, i := range l.instances { - if _, exist := newInstances[key]; exist { - continue - } - i.Stop() - } - l.instances = newInstances - - return nil -} - -// Stop stops the log collector. -func (l *Logs) Stop() { - l.mut.Lock() - defer l.mut.Unlock() - - for _, i := range l.instances { - i.Stop() - } -} - -// Instance is used to retrieve a named Logs instance -func (l *Logs) Instance(name string) *Instance { - l.mut.Lock() - defer l.mut.Unlock() - - return l.instances[name] -} - -// Instance is an individual Logs instance. -type Instance struct { - mut sync.Mutex - - cfg *InstanceConfig - log log.Logger - reg *util.Unregisterer - - promtail *promtail.Promtail -} - -// NewInstance creates and starts a Logs instance. -func NewInstance(reg prometheus.Registerer, c *InstanceConfig, g GlobalConfig, l log.Logger, dryRun bool) (*Instance, error) { - instReg := prometheus.WrapRegistererWith(prometheus.Labels{"logs_config": c.Name}, reg) - - inst := Instance{ - reg: util.WrapWithUnregisterer(instReg), - log: log.With(l, "logs_config", c.Name), - } - if err := inst.ApplyConfig(c, g, dryRun); err != nil { - return nil, err - } - return &inst, nil -} - // DefaultConfig returns a default config for a Logs instance. func DefaultConfig() config.Config { return config.Config{ @@ -146,103 +24,3 @@ func DefaultConfig() config.Config { WAL: wal.Config{Enabled: false}, } } - -// ApplyConfig will apply a new InstanceConfig. If the config hasn't changed, -// then nothing will happen, otherwise the old Promtail will be stopped and -// then replaced with a new one. -func (i *Instance) ApplyConfig(c *InstanceConfig, g GlobalConfig, dryRun bool) error { - i.mut.Lock() - defer i.mut.Unlock() - - // No-op if the configs haven't changed. - if util.CompareYAML(c, i.cfg) { - level.Debug(i.log).Log("msg", "instance config hasn't changed, not recreating Promtail") - return nil - } - i.cfg = c - - positionsDir := filepath.Dir(c.PositionsConfig.PositionsFile) - err := os.MkdirAll(positionsDir, 0775) - if err != nil { - level.Warn(i.log).Log("msg", "failed to create the positions directory. logs may be unable to save their position", "path", positionsDir, "err", err) - } - - if i.promtail != nil { - i.promtail.Shutdown() - i.promtail = nil - } - - // Unregister all existing metrics before trying to create a new instance. - if !i.reg.UnregisterAll() { - // If UnregisterAll fails, we need to abort, otherwise the new promtail - // would try to re-register an existing metric and might panic. - return fmt.Errorf("failed to unregister all metrics from previous promtail. THIS IS A BUG") - } - - if len(c.ClientConfigs) == 0 { - level.Debug(i.log).Log("msg", "skipping creation of a promtail because no client_configs are present") - return nil - } - - uid := agentseed.Get().UID - for i := range c.ClientConfigs { - // ClientConfigs is a slice of struct, so we set values with the index - if c.ClientConfigs[i].Headers == nil { - c.ClientConfigs[i].Headers = map[string]string{} - } - c.ClientConfigs[i].Headers[agentseed.HeaderName] = uid - } - - clientMetrics := client.NewMetrics(i.reg) - cfg := DefaultConfig() - cfg.Global = config.GlobalConfig{ - FileWatch: file.WatchConfig{ - MinPollFrequency: g.FileWatch.MinPollFrequency, - MaxPollFrequency: g.FileWatch.MaxPollFrequency, - }, - } - cfg.ClientConfigs = c.ClientConfigs - cfg.PositionsConfig = c.PositionsConfig - cfg.ScrapeConfig = c.ScrapeConfig - cfg.TargetConfig = c.TargetConfig - cfg.LimitsConfig = c.LimitsConfig - - p, err := promtail.New(cfg, nil, clientMetrics, dryRun, promtail.WithLogger(i.log), promtail.WithRegisterer(i.reg)) - if err != nil { - return fmt.Errorf("unable to create logs instance: %w", err) - } - - i.promtail = p - return nil -} - -// SendEntry passes an entry to the internal promtail client and returns true if successfully sent. It is -// best effort and not guaranteed to succeed. -func (i *Instance) SendEntry(entry api.Entry, dur time.Duration) bool { - i.mut.Lock() - defer i.mut.Unlock() - - // promtail is nil it has been stopped - if i.promtail != nil { - // send non blocking so we don't block the mutex. this is best effort - select { - case i.promtail.Client().Chan() <- entry: - return true - case <-time.After(dur): - } - } - - return false -} - -// Stop stops the Promtail instance. -func (i *Instance) Stop() { - i.mut.Lock() - defer i.mut.Unlock() - - if i.promtail != nil { - i.promtail.Shutdown() - i.promtail = nil - } - i.reg.UnregisterAll() -} diff --git a/internal/static/logs/logs_test.go b/internal/static/logs/logs_test.go deleted file mode 100644 index 255c99b55f..0000000000 --- a/internal/static/logs/logs_test.go +++ /dev/null @@ -1,206 +0,0 @@ -//go:build !race - -package logs - -import ( - "fmt" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/grafana/loki/pkg/loghttp/push" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" - "github.com/grafana/loki/pkg/logproto" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" -) - -func TestLogs_NilConfig(t *testing.T) { - l, err := New(prometheus.NewRegistry(), nil, util.TestLogger(t), false) - require.NoError(t, err) - require.NoError(t, l.ApplyConfig(nil, false)) - - defer l.Stop() -} - -func TestLogs(t *testing.T) { - // - // Create a temporary file to tail - // - positionsDir := t.TempDir() - - tmpFile, err := os.CreateTemp(os.TempDir(), "*.log") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(tmpFile.Name()) - }) - - // - // Listen for push requests and pass them through to a channel - // - pushes := make(chan *logproto.PushRequest) - - lis, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, lis.Close()) - }) - go func() { - _ = http.Serve(lis, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - req, err := push.ParseRequest(log.NewNopLogger(), "user_id", r, nil, nil, push.ParseLokiRequest) - require.NoError(t, err) - - pushes <- req - _, _ = rw.Write(nil) - })) - }() - - // - // Launch Loki so it starts tailing the file and writes to our server. - // - cfgText := util.Untab(fmt.Sprintf(` -positions_directory: %s -configs: -- name: default - clients: - - url: http://%s/loki/api/v1/push - batchwait: 50ms - batchsize: 1 - scrape_configs: - - job_name: system - static_configs: - - targets: [localhost] - labels: - job: test - __path__: %s - `, positionsDir, lis.Addr().String(), tmpFile.Name())) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&cfg)) - require.NoError(t, cfg.ApplyDefaults()) - logger := log.NewSyncLogger(log.NewNopLogger()) - l, err := New(prometheus.NewRegistry(), &cfg, logger, false) - require.NoError(t, err) - defer l.Stop() - - // - // Write a log line and wait for it to come through. - // - fmt.Fprintf(tmpFile, "Hello, world!\n") - select { - case <-time.After(time.Second * 30): - require.FailNow(t, "timed out waiting for data to be pushed") - case req := <-pushes: - require.Equal(t, "Hello, world!", req.Streams[0].Entries[0].Line) - } - - // - // Apply a new config and write a new line. - // - cfgText = util.Untab(fmt.Sprintf(` -positions_directory: %s -configs: -- name: default - clients: - - url: http://%s/loki/api/v1/push - batchwait: 50ms - batchsize: 5 - scrape_configs: - - job_name: system - static_configs: - - targets: [localhost] - labels: - job: test-2 - __path__: %s - `, positionsDir, lis.Addr().String(), tmpFile.Name())) - - var newCfg Config - dec = yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&newCfg)) - require.NoError(t, newCfg.ApplyDefaults()) - require.NoError(t, l.ApplyConfig(&newCfg, false)) - - fmt.Fprintf(tmpFile, "Hello again!\n") - select { - case <-time.After(time.Second * 30): - require.FailNow(t, "timed out waiting for data to be pushed") - case req := <-pushes: - require.Equal(t, "Hello again!", req.Streams[0].Entries[0].Line) - } - - t.Run("update to nil", func(t *testing.T) { - // Applying a nil config should remove all instances. - err := l.ApplyConfig(nil, false) - require.NoError(t, err) - require.Len(t, l.instances, 0) - }) - - t.Run("re-apply previous config", func(t *testing.T) { - // Applying a nil config should remove all instances. - l.ApplyConfig(nil, false) - - // Re-Apply the previous config and write a new line. - var newCfg Config - dec = yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&newCfg)) - require.NoError(t, newCfg.ApplyDefaults()) - require.NoError(t, l.ApplyConfig(&newCfg, false)) - - fmt.Fprintf(tmpFile, "Hello again!\n") - select { - case <-time.After(time.Second * 30): - require.FailNow(t, "timed out waiting for data to be pushed") - case req := <-pushes: - require.Equal(t, "Hello again!", req.Streams[0].Entries[0].Line) - } - }) -} - -func TestLogs_PositionsDirectory(t *testing.T) { - // - // Create a temporary file to tail - // - positionsDir := t.TempDir() - - // - // Launch Loki so it starts tailing the file and writes to our server. - // - cfgText := util.Untab(fmt.Sprintf(` -positions_directory: %[1]s/positions -configs: -- name: instance-a - clients: - - url: http://127.0.0.1:80/loki/api/v1/push -- name: instance-b - positions: - filename: %[1]s/other-positions/instance.yml - clients: - - url: http://127.0.0.1:80/loki/api/v1/push - `, positionsDir)) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(cfgText)) - dec.SetStrict(true) - require.NoError(t, dec.Decode(&cfg)) - require.NoError(t, cfg.ApplyDefaults()) - logger := util.TestLogger(t) - l, err := New(prometheus.NewRegistry(), &cfg, logger, false) - require.NoError(t, err) - defer l.Stop() - - _, err = os.Stat(filepath.Join(positionsDir, "positions")) - require.NoError(t, err, "default shared positions directory did not get created") - _, err = os.Stat(filepath.Join(positionsDir, "other-positions")) - require.NoError(t, err, "instance-specific positions directory did not get created") -} diff --git a/internal/static/metrics/agent.go b/internal/static/metrics/agent.go index 0c2c745300..6ef123c1ec 100644 --- a/internal/static/metrics/agent.go +++ b/internal/static/metrics/agent.go @@ -7,29 +7,21 @@ import ( "errors" "flag" "fmt" - "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/atomic" - "google.golang.org/grpc" - "github.com/grafana/agent/internal/static/metrics/cluster" "github.com/grafana/agent/internal/static/metrics/cluster/client" "github.com/grafana/agent/internal/static/metrics/instance" "github.com/grafana/agent/internal/util" - "github.com/prometheus/prometheus/discovery" ) // DefaultConfig is the default settings for the Prometheus-lite client. var DefaultConfig = Config{ Global: instance.DefaultGlobalConfig, - InstanceRestartBackoff: instance.DefaultBasicManagerConfig.InstanceRestartBackoff, + InstanceRestartBackoff: 5 * time.Second, WALDir: "data-agent/", - WALCleanupAge: DefaultCleanupAge, - WALCleanupPeriod: DefaultCleanupPeriod, + WALCleanupAge: 12 * time.Hour, + WALCleanupPeriod: 30 * time.Minute, ServiceConfig: cluster.DefaultConfig, ServiceClientConfig: client.DefaultConfig, InstanceMode: instance.DefaultMode, @@ -123,257 +115,3 @@ func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { c.ServiceConfig.RegisterFlagsWithPrefix(prefix+"service.", f) c.ServiceClientConfig.RegisterFlagsWithPrefix(prefix, f) } - -// Agent is an agent for collecting Prometheus metrics. It acts as a -// Prometheus-lite; only running the service discovery, remote_write, and WAL -// components of Prometheus. It is broken down into a series of Instances, each -// of which perform metric collection. -type Agent struct { - mut sync.RWMutex - cfg Config - logger log.Logger - reg prometheus.Registerer - - // Store both the basic manager and the modal manager, so we can update their - // settings independently. Only the ModalManager should be used for mutating - // configs. - bm *instance.BasicManager - mm *instance.ModalManager - cleaner *WALCleaner - - instanceFactory instanceFactory - - cluster *cluster.Cluster - - stopped bool - stopOnce sync.Once - actor chan func() - - initialBootDone atomic.Bool -} - -// New creates and starts a new Agent. -func New(reg prometheus.Registerer, cfg Config, logger log.Logger) (*Agent, error) { - // This registers discovery metrics with the default registry which should be the reg specified above. - discovery.RegisterMetrics() - return newAgent(reg, cfg, logger, defaultInstanceFactory) -} - -func newAgent(reg prometheus.Registerer, cfg Config, logger log.Logger, fact instanceFactory) (*Agent, error) { - a := &Agent{ - logger: log.With(logger, "agent", "prometheus"), - instanceFactory: fact, - reg: reg, - actor: make(chan func(), 1), - } - - a.bm = instance.NewBasicManager(instance.BasicManagerConfig{ - InstanceRestartBackoff: cfg.InstanceRestartBackoff, - }, a.logger, a.newInstance) - - var err error - a.mm, err = instance.NewModalManager(a.reg, a.logger, a.bm, cfg.InstanceMode) - if err != nil { - return nil, fmt.Errorf("failed to create modal instance manager: %w", err) - } - - a.cluster, err = cluster.New(a.logger, reg, cfg.ServiceConfig, a.mm, a.Validate) - if err != nil { - return nil, err - } - - if err := a.ApplyConfig(cfg); err != nil { - return nil, err - } - go a.run() - return a, nil -} - -// newInstance creates a new Instance given a config. -func (a *Agent) newInstance(c instance.Config) (instance.ManagedInstance, error) { - a.mut.RLock() - defer a.mut.RUnlock() - - // Controls the label - instanceLabel := "instance_name" - if a.cfg.InstanceMode == instance.ModeShared { - instanceLabel = "instance_group_name" - } - - reg := prometheus.WrapRegistererWith(prometheus.Labels{ - instanceLabel: c.Name, - }, a.reg) - - return a.instanceFactory(reg, c, a.cfg.WALDir, a.logger) -} - -// Validate will validate the incoming Config and mutate it to apply defaults. -func (a *Agent) Validate(c *instance.Config) error { - a.mut.RLock() - defer a.mut.RUnlock() - - if a.cfg.WALDir == "" { - return fmt.Errorf("no wal_directory configured") - } - - if err := c.ApplyDefaults(a.cfg.Global); err != nil { - return fmt.Errorf("failed to apply defaults to %q: %w", c.Name, err) - } - return nil -} - -// ApplyConfig applies config changes to the Agent. -func (a *Agent) ApplyConfig(cfg Config) error { - a.mut.Lock() - defer a.mut.Unlock() - - if util.CompareYAML(a.cfg, cfg) { - return nil - } - - if a.stopped { - return fmt.Errorf("agent stopped") - } - - // The ordering here is done to minimze the number of instances that need to - // be restarted. We update components from lowest to highest level: - // - // 1. WAL Cleaner - // 2. Basic manager - // 3. Modal Manager - // 4. Cluster - // 5. Local configs - - if a.cleaner != nil { - a.cleaner.Stop() - a.cleaner = nil - } - if cfg.WALDir != "" { - a.cleaner = NewWALCleaner( - a.logger, - a.mm, - cfg.WALDir, - cfg.WALCleanupAge, - cfg.WALCleanupPeriod, - ) - } - - a.bm.UpdateManagerConfig(instance.BasicManagerConfig{ - InstanceRestartBackoff: cfg.InstanceRestartBackoff, - }) - - if err := a.mm.SetMode(cfg.InstanceMode); err != nil { - return err - } - - if err := a.cluster.ApplyConfig(cfg.ServiceConfig); err != nil { - return fmt.Errorf("failed to apply cluster config: %w", err) - } - - // Queue an actor in the background to sync the instances. This is required - // because creating both this function and newInstance grab the mutex. - oldConfig := a.cfg - - a.actor <- func() { - a.syncInstances(oldConfig, cfg) - a.initialBootDone.Store(true) - } - - a.cfg = cfg - return nil -} - -// syncInstances syncs the state of the instance manager to newConfig by -// applying all configs from newConfig and deleting any configs from oldConfig -// that are not in newConfig. -func (a *Agent) syncInstances(oldConfig, newConfig Config) { - // Apply the new configs - for _, c := range newConfig.Configs { - if err := a.mm.ApplyConfig(c); err != nil { - level.Error(a.logger).Log("msg", "failed to apply config", "name", c.Name, "err", err) - } - } - - // Remove any configs from oldConfig that aren't in newConfig. - for _, oc := range oldConfig.Configs { - foundConfig := false - for _, nc := range newConfig.Configs { - if nc.Name == oc.Name { - foundConfig = true - break - } - } - if foundConfig { - continue - } - - if err := a.mm.DeleteConfig(oc.Name); err != nil { - level.Error(a.logger).Log("msg", "failed to delete old config", "name", oc.Name, "err", err) - } - } -} - -// run calls received actor functions in the background. -func (a *Agent) run() { - for f := range a.actor { - f() - } -} - -// Ready returns true if both the agent and all instances -// spawned by a Manager have completed startup. -func (a *Agent) Ready() bool { - // Wait for the initial load to complete so the instance manager has at least - // the base set of expected instances. - if !a.initialBootDone.Load() { - return false - } - - for _, inst := range a.mm.ListInstances() { - if !inst.Ready() { - return false - } - } - - return true -} - -// WireGRPC wires gRPC services into the provided server. -func (a *Agent) WireGRPC(s *grpc.Server) { - a.cluster.WireGRPC(s) -} - -// Config returns the configuration of this Agent. -func (a *Agent) Config() Config { return a.cfg } - -// InstanceManager returns the instance manager used by this Agent. -func (a *Agent) InstanceManager() instance.Manager { return a.mm } - -// Stop stops the agent and all its instances. -func (a *Agent) Stop() { - a.mut.Lock() - defer a.mut.Unlock() - - // Close the actor channel to stop run. - a.stopOnce.Do(func() { - close(a.actor) - }) - - a.cluster.Stop() - - if a.cleaner != nil { - a.cleaner.Stop() - } - - // Only need to stop the ModalManager, which will pass through everything to the - // BasicManager. - a.mm.Stop() - - a.stopped = true -} - -type instanceFactory = func(reg prometheus.Registerer, cfg instance.Config, walDir string, logger log.Logger) (instance.ManagedInstance, error) - -func defaultInstanceFactory(reg prometheus.Registerer, cfg instance.Config, walDir string, logger log.Logger) (instance.ManagedInstance, error) { - return instance.New(reg, cfg, walDir, logger) -} diff --git a/internal/static/metrics/agent_test.go b/internal/static/metrics/agent_test.go index bd311a07cd..2d1d063b20 100644 --- a/internal/static/metrics/agent_test.go +++ b/internal/static/metrics/agent_test.go @@ -1,22 +1,11 @@ package metrics import ( - "context" "errors" - "fmt" - "net/http" - "sync" "testing" - "time" - "github.com/go-kit/log" "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" - "go.uber.org/atomic" "gopkg.in/yaml.v2" ) @@ -113,221 +102,6 @@ configs: require.Greater(t, int64(scrapeConfig.ScrapeInterval), int64(0)) } -func TestAgent(t *testing.T) { - // Launch two instances - cfg := Config{ - WALDir: "/tmp/wal", - Configs: []instance.Config{ - makeInstanceConfig("instance_a"), - makeInstanceConfig("instance_b"), - }, - InstanceRestartBackoff: time.Duration(0), - InstanceMode: instance.ModeDistinct, - } - - fact := newFakeInstanceFactory() - - a, err := newAgent(prometheus.NewRegistry(), cfg, log.NewNopLogger(), fact.factory) - require.NoError(t, err) - - util.Eventually(t, func(t require.TestingT) { - require.NotNil(t, fact.created) - require.Equal(t, 2, int(fact.created.Load())) - require.Equal(t, 2, len(a.mm.ListInstances())) - }) - - t.Run("instances should be running", func(t *testing.T) { - for _, mi := range fact.Mocks() { - // Each instance should have wait called on it - util.Eventually(t, func(t require.TestingT) { - require.True(t, mi.running.Load()) - }) - } - }) - - t.Run("instances should be restarted when stopped", func(t *testing.T) { - for _, mi := range fact.Mocks() { - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 1, int(mi.startedCount.Load())) - }) - } - - for _, mi := range fact.Mocks() { - mi.err <- fmt.Errorf("really bad error") - } - - for _, mi := range fact.Mocks() { - util.Eventually(t, func(t require.TestingT) { - require.Equal(t, 2, int(mi.startedCount.Load())) - }) - } - }) -} - -func TestAgent_NormalInstanceExits(t *testing.T) { - tt := []struct { - name string - simulateError error - }{ - {"no error", nil}, - {"context cancelled", context.Canceled}, - } - - cfg := Config{ - WALDir: "/tmp/wal", - Configs: []instance.Config{ - makeInstanceConfig("instance_a"), - makeInstanceConfig("instance_b"), - }, - InstanceRestartBackoff: time.Duration(0), - InstanceMode: instance.ModeDistinct, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - fact := newFakeInstanceFactory() - - a, err := newAgent(prometheus.NewRegistry(), cfg, log.NewNopLogger(), fact.factory) - require.NoError(t, err) - - util.Eventually(t, func(t require.TestingT) { - require.NotNil(t, fact.created) - require.Equal(t, 2, int(fact.created.Load())) - require.Equal(t, 2, len(a.mm.ListInstances())) - }) - for _, mi := range fact.Mocks() { - mi.err <- tc.simulateError - } - - time.Sleep(time.Millisecond * 100) - - // Get the new total amount of instances starts; value should - // be unchanged. - var startedCount int64 - for _, i := range fact.Mocks() { - startedCount += i.startedCount.Load() - } - - // There should only be two instances that started. If there's more, something - // restarted despite our error. - require.Equal(t, int64(2), startedCount, "instances should not have restarted") - }) - } -} - -func TestAgent_Stop(t *testing.T) { - // Launch two instances - cfg := Config{ - WALDir: "/tmp/wal", - Configs: []instance.Config{ - makeInstanceConfig("instance_a"), - makeInstanceConfig("instance_b"), - }, - InstanceRestartBackoff: time.Duration(0), - InstanceMode: instance.ModeDistinct, - } - - fact := newFakeInstanceFactory() - - a, err := newAgent(prometheus.NewRegistry(), cfg, log.NewNopLogger(), fact.factory) - require.NoError(t, err) - - util.Eventually(t, func(t require.TestingT) { - require.NotNil(t, fact.created) - require.Equal(t, 2, int(fact.created.Load())) - require.Equal(t, 2, len(a.mm.ListInstances())) - }) - - a.Stop() - - time.Sleep(time.Millisecond * 100) - - for _, mi := range fact.Mocks() { - require.False(t, mi.running.Load(), "instance should not have been restarted") - } -} - -type fakeInstance struct { - cfg instance.Config - - err chan error - startedCount *atomic.Int64 - running *atomic.Bool -} - -func (i *fakeInstance) Run(ctx context.Context) error { - i.startedCount.Inc() - i.running.Store(true) - defer i.running.Store(false) - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-i.err: - return err - } -} - -func (i *fakeInstance) Ready() bool { - return true -} - -func (i *fakeInstance) Update(_ instance.Config) error { - return instance.ErrInvalidUpdate{ - Inner: fmt.Errorf("can't dynamically update fakeInstance"), - } -} - -func (i *fakeInstance) TargetsActive() map[string][]*scrape.Target { - return nil -} - -func (i *fakeInstance) StorageDirectory() string { - return "" -} - -func (i *fakeInstance) WriteHandler() http.Handler { - return nil -} - -func (i *fakeInstance) Appender(ctx context.Context) storage.Appender { - return nil -} - -type fakeInstanceFactory struct { - mut sync.Mutex - mocks []*fakeInstance - - created *atomic.Int64 -} - -func newFakeInstanceFactory() *fakeInstanceFactory { - return &fakeInstanceFactory{created: atomic.NewInt64(0)} -} - -func (f *fakeInstanceFactory) Mocks() []*fakeInstance { - f.mut.Lock() - defer f.mut.Unlock() - return f.mocks -} - -func (f *fakeInstanceFactory) factory(_ prometheus.Registerer, cfg instance.Config, _ string, _ log.Logger) (instance.ManagedInstance, error) { - f.created.Add(1) - - f.mut.Lock() - defer f.mut.Unlock() - - inst := &fakeInstance{ - cfg: cfg, - running: atomic.NewBool(false), - startedCount: atomic.NewInt64(0), - err: make(chan error), - } - - f.mocks = append(f.mocks, inst) - return inst, nil -} - func makeInstanceConfig(name string) instance.Config { cfg := instance.DefaultConfig cfg.Name = name diff --git a/internal/static/metrics/cleaner.go b/internal/static/metrics/cleaner.go deleted file mode 100644 index 0bf577a5b6..0000000000 --- a/internal/static/metrics/cleaner.go +++ /dev/null @@ -1,271 +0,0 @@ -package metrics - -import ( - "fmt" - "os" - "path/filepath" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/metrics/wal" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - promwal "github.com/prometheus/prometheus/tsdb/wlog" -) - -// Default settings for the WAL cleaner. -const ( - DefaultCleanupAge = 12 * time.Hour - DefaultCleanupPeriod = 30 * time.Minute -) - -var ( - discoveryError = promauto.NewCounterVec( - prometheus.CounterOpts{ - Name: "agent_metrics_cleaner_storage_error_total", - Help: "Errors encountered discovering local storage paths", - }, - []string{"storage"}, - ) - - segmentError = promauto.NewCounterVec( - prometheus.CounterOpts{ - Name: "agent_metrics_cleaner_segment_error_total", - Help: "Errors encountered finding most recent WAL segments", - }, - []string{"storage"}, - ) - - managedStorage = promauto.NewGauge( - prometheus.GaugeOpts{ - Name: "agent_metrics_cleaner_managed_storage", - Help: "Number of storage directories associated with managed instances", - }, - ) - - abandonedStorage = promauto.NewGauge( - prometheus.GaugeOpts{ - Name: "agent_metrics_cleaner_abandoned_storage", - Help: "Number of storage directories not associated with any managed instance", - }, - ) - - cleanupRunsSuccess = promauto.NewCounter( - prometheus.CounterOpts{ - Name: "agent_metrics_cleaner_success_total", - Help: "Number of successfully removed abandoned WALs", - }, - ) - - cleanupRunsErrors = promauto.NewCounter( - prometheus.CounterOpts{ - Name: "agent_metrics_cleaner_errors_total", - Help: "Number of errors removing abandoned WALs", - }, - ) - - cleanupTimes = promauto.NewHistogram( - prometheus.HistogramOpts{ - Name: "agent_metrics_cleaner_cleanup_seconds", - Help: "Time spent performing each periodic WAL cleanup", - }, - ) -) - -// lastModifiedFunc gets the last modified time of the most recent segment of a WAL -type lastModifiedFunc func(path string) (time.Time, error) - -func lastModified(path string) (time.Time, error) { - existing, err := promwal.Open(nil, path) - if err != nil { - return time.Time{}, err - } - - // We don't care if there are errors closing the abandoned WAL - defer func() { _ = existing.Close() }() - - _, last, err := promwal.Segments(existing.Dir()) - if err != nil { - return time.Time{}, fmt.Errorf("unable to open WAL: %w", err) - } - - if last == -1 { - return time.Time{}, fmt.Errorf("unable to determine most recent segment for %s", path) - } - - // full path to the most recent segment in this WAL - lastSegment := promwal.SegmentName(path, last) - segmentFile, err := os.Stat(lastSegment) - if err != nil { - return time.Time{}, fmt.Errorf("unable to determine mtime for %s segment: %w", lastSegment, err) - } - - return segmentFile.ModTime(), nil -} - -// WALCleaner periodically checks for Write Ahead Logs (WALs) that are not associated -// with any active instance.ManagedInstance and have not been written to in some configured -// amount of time and deletes them. -type WALCleaner struct { - logger log.Logger - instanceManager instance.Manager - walDirectory string - walLastModified lastModifiedFunc - minAge time.Duration - period time.Duration - done chan bool -} - -// NewWALCleaner creates a new cleaner that looks for abandoned WALs in the given -// directory and removes them if they haven't been modified in over minAge. Starts -// a goroutine to periodically run the cleanup method in a loop -func NewWALCleaner(logger log.Logger, manager instance.Manager, walDirectory string, minAge time.Duration, period time.Duration) *WALCleaner { - c := &WALCleaner{ - logger: log.With(logger, "component", "cleaner"), - instanceManager: manager, - walDirectory: filepath.Clean(walDirectory), - walLastModified: lastModified, - minAge: DefaultCleanupAge, - period: DefaultCleanupPeriod, - done: make(chan bool), - } - - if minAge > 0 { - c.minAge = minAge - } - - // We allow a period of 0 here because '0' means "don't run the task". This - // is handled by not running a ticker at all in the run method. - if period >= 0 { - c.period = period - } - - go c.run() - return c -} - -// getManagedStorage gets storage directories used for each ManagedInstance -func (c *WALCleaner) getManagedStorage(instances map[string]instance.ManagedInstance) map[string]bool { - out := make(map[string]bool) - - for _, inst := range instances { - out[inst.StorageDirectory()] = true - } - - return out -} - -// getAllStorage gets all storage directories under walDirectory -func (c *WALCleaner) getAllStorage() []string { - var out []string - - _ = filepath.Walk(c.walDirectory, func(p string, info os.FileInfo, err error) error { - if os.IsNotExist(err) { - // The root WAL directory doesn't exist. Maybe this Agent isn't responsible for any - // instances yet. Log at debug since this isn't a big deal. We'll just try to crawl - // the direction again on the next periodic run. - level.Debug(c.logger).Log("msg", "WAL storage path does not exist", "path", p, "err", err) - } else if err != nil { - // Just log any errors traversing the WAL directory. This will potentially result - // in a WAL (that has incorrect permissions or some similar problem) not being cleaned - // up. This is better than preventing *all* other WALs from being cleaned up. - discoveryError.WithLabelValues(p).Inc() - level.Warn(c.logger).Log("msg", "unable to traverse WAL storage path", "path", p, "err", err) - } else if info.IsDir() && filepath.Dir(p) == c.walDirectory { - // Single level below the root are instance storage directories (including WALs) - out = append(out, p) - } - - return nil - }) - - return out -} - -// getAbandonedStorage gets the full path of storage directories that aren't associated with -// an active instance and haven't been written to within a configured duration (usually several -// hours or more). -func (c *WALCleaner) getAbandonedStorage(all []string, managed map[string]bool, now time.Time) []string { - var out []string - - for _, dir := range all { - if managed[dir] { - level.Debug(c.logger).Log("msg", "active WAL", "name", dir) - continue - } - - walDir := wal.SubDirectory(dir) - mtime, err := c.walLastModified(walDir) - if err != nil { - segmentError.WithLabelValues(dir).Inc() - level.Warn(c.logger).Log("msg", "unable to find segment mtime of WAL", "name", dir, "err", err) - continue - } - - diff := now.Sub(mtime) - if diff > c.minAge { - // The last segment for this WAL was modified more than $minAge (positive number of hours) - // in the past. This makes it a candidate for deletion since it's also not associated with - // any Instances this agent knows about. - out = append(out, dir) - } - - level.Debug(c.logger).Log("msg", "abandoned WAL", "name", dir, "mtime", mtime, "diff", diff) - } - - return out -} - -// run cleans up abandoned WALs (if period != 0) in a loop periodically until stopped -func (c *WALCleaner) run() { - // A period of 0 means don't run a cleanup task - if c.period == 0 { - return - } - - ticker := time.NewTicker(c.period) - defer ticker.Stop() - - for { - select { - case <-c.done: - level.Debug(c.logger).Log("msg", "stopping cleaner...") - return - case <-ticker.C: - c.cleanup() - } - } -} - -// cleanup removes any abandoned and unused WAL directories. Note that it shouldn't be -// necessary to call this method explicitly in most cases since it will be run periodically -// in a goroutine (started when WALCleaner is created). -func (c *WALCleaner) cleanup() { - start := time.Now() - all := c.getAllStorage() - managed := c.getManagedStorage(c.instanceManager.ListInstances()) - abandoned := c.getAbandonedStorage(all, managed, time.Now()) - - managedStorage.Set(float64(len(managed))) - abandonedStorage.Set(float64(len(abandoned))) - - for _, a := range abandoned { - level.Info(c.logger).Log("msg", "deleting abandoned WAL", "name", a) - err := os.RemoveAll(a) - if err != nil { - level.Error(c.logger).Log("msg", "failed to delete abandoned WAL", "name", a, "err", err) - cleanupRunsErrors.Inc() - } else { - cleanupRunsSuccess.Inc() - } - } - - cleanupTimes.Observe(time.Since(start).Seconds()) -} - -// Stop the cleaner and any background tasks running -func (c *WALCleaner) Stop() { - close(c.done) -} diff --git a/internal/static/metrics/cleaner_test.go b/internal/static/metrics/cleaner_test.go deleted file mode 100644 index f8aeac7fa7..0000000000 --- a/internal/static/metrics/cleaner_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package metrics - -import ( - "os" - "path/filepath" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/stretchr/testify/require" -) - -func TestWALCleaner_getAllStorageNoRoot(t *testing.T) { - walRoot := filepath.Join(os.TempDir(), "getAllStorageNoRoot") - logger := log.NewLogfmtLogger(os.Stderr) - cleaner := NewWALCleaner( - logger, - &instance.MockManager{}, - walRoot, - DefaultCleanupAge, - DefaultCleanupPeriod, - ) - - // Bogus WAL root that doesn't exist. Method should return no results - wals := cleaner.getAllStorage() - - require.Empty(t, wals) -} - -func TestWALCleaner_getAllStorageSuccess(t *testing.T) { - walRoot := t.TempDir() - - walDir := filepath.Join(walRoot, "instance-1") - err := os.MkdirAll(walDir, 0755) - require.NoError(t, err) - - logger := log.NewLogfmtLogger(os.Stderr) - cleaner := NewWALCleaner( - logger, - &instance.MockManager{}, - walRoot, - DefaultCleanupAge, - DefaultCleanupPeriod, - ) - wals := cleaner.getAllStorage() - - require.Equal(t, []string{walDir}, wals) -} - -func TestWALCleaner_getAbandonedStorageBeforeCutoff(t *testing.T) { - walRoot := t.TempDir() - - walDir := filepath.Join(walRoot, "instance-1") - err := os.MkdirAll(walDir, 0755) - require.NoError(t, err) - - all := []string{walDir} - managed := make(map[string]bool) - now := time.Now() - - logger := log.NewLogfmtLogger(os.Stderr) - cleaner := NewWALCleaner( - logger, - &instance.MockManager{}, - walRoot, - 5*time.Minute, - DefaultCleanupPeriod, - ) - - cleaner.walLastModified = func(path string) (time.Time, error) { - return now, nil - } - - // Last modification time on our WAL directory is the same as "now" - // so there shouldn't be any results even though it's not part of the - // set of "managed" directories. - abandoned := cleaner.getAbandonedStorage(all, managed, now) - require.Empty(t, abandoned) -} - -func TestWALCleaner_getAbandonedStorageAfterCutoff(t *testing.T) { - walRoot := t.TempDir() - - walDir := filepath.Join(walRoot, "instance-1") - err := os.MkdirAll(walDir, 0755) - require.NoError(t, err) - - all := []string{walDir} - managed := make(map[string]bool) - now := time.Now() - - logger := log.NewLogfmtLogger(os.Stderr) - cleaner := NewWALCleaner( - logger, - &instance.MockManager{}, - walRoot, - 5*time.Minute, - DefaultCleanupPeriod, - ) - - cleaner.walLastModified = func(path string) (time.Time, error) { - return now.Add(-30 * time.Minute), nil - } - - // Last modification time on our WAL directory is 30 minutes in the past - // compared to "now" and we've set the cutoff for our cleaner to be 5 - // minutes: our WAL directory should show up as abandoned - abandoned := cleaner.getAbandonedStorage(all, managed, now) - require.Equal(t, []string{walDir}, abandoned) -} - -func TestWALCleaner_cleanup(t *testing.T) { - walRoot := t.TempDir() - - walDir := filepath.Join(walRoot, "instance-1") - err := os.MkdirAll(walDir, 0755) - require.NoError(t, err) - - now := time.Now() - logger := log.NewLogfmtLogger(os.Stderr) - manager := &instance.MockManager{} - manager.ListInstancesFunc = func() map[string]instance.ManagedInstance { - return make(map[string]instance.ManagedInstance) - } - - cleaner := NewWALCleaner( - logger, - manager, - walRoot, - 5*time.Minute, - DefaultCleanupPeriod, - ) - - cleaner.walLastModified = func(path string) (time.Time, error) { - return now.Add(-30 * time.Minute), nil - } - - // Last modification time on our WAL directory is 30 minutes in the past - // compared to "now" and we've set the cutoff for our cleaner to be 5 - // minutes: our WAL directory should be removed since it's abandoned - cleaner.cleanup() - _, err = os.Stat(walDir) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) -} diff --git a/internal/static/metrics/cluster/client/client.go b/internal/static/metrics/cluster/client/client.go index 1b90feb99f..b4180ab3b0 100644 --- a/internal/static/metrics/cluster/client/client.go +++ b/internal/static/metrics/cluster/client/client.go @@ -2,25 +2,12 @@ package client import ( "flag" - "io" "reflect" - "github.com/grafana/agent/internal/static/agentproto" "github.com/grafana/agent/internal/util" "github.com/grafana/dskit/grpcclient" - "github.com/grafana/dskit/middleware" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" ) -// ScrapingServiceClient wraps agentproto.ScrapingServiceClient with a Close method. -type ScrapingServiceClient interface { - agentproto.ScrapingServiceClient - io.Closer -} - var ( // DefaultConfig provides default Config values. DefaultConfig = *util.DefaultConfigFromFlags(&Config{}).(*Config) @@ -54,40 +41,3 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { c.GRPCClientConfig.RegisterFlagsWithPrefix(prefix+"service-client", f) } - -// New returns a new scraping service client. -func New(cfg Config, addr string) (ScrapingServiceClient, error) { - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultCallOptions(cfg.GRPCClientConfig.CallOptions()...), - } - grpcDialOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation()) - if err != nil { - return nil, err - } - opts = append(opts, grpcDialOpts...) - conn, err := grpc.Dial(addr, opts...) - if err != nil { - return nil, err - } - - return struct { - agentproto.ScrapingServiceClient - io.Closer - }{ - ScrapingServiceClient: agentproto.NewScrapingServiceClient(conn), - Closer: conn, - }, nil -} - -func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { - unary := []grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - } - stream := []grpc.StreamClientInterceptor{ - otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), - middleware.StreamClientUserHeaderInterceptor, - } - return unary, stream -} diff --git a/internal/static/metrics/cluster/cluster.go b/internal/static/metrics/cluster/cluster.go deleted file mode 100644 index 9ab498f7e4..0000000000 --- a/internal/static/metrics/cluster/cluster.go +++ /dev/null @@ -1,179 +0,0 @@ -package cluster - -import ( - "context" - "fmt" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/golang/protobuf/ptypes/empty" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/agentproto" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/metrics/instance/configstore" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" -) - -// Cluster connects an Agent to other Agents and allows them to distribute -// workload. -type Cluster struct { - mut sync.RWMutex - - log log.Logger - cfg Config - baseValidation ValidationFunc - - // - // Internally, Cluster glues together four separate pieces of logic. - // See comments below to get an understanding of what is going on. - // - - // node manages membership in the cluster and performs cluster-wide reshards. - node *node - - // store connects to a configstore for changes. storeAPI is an HTTP API for it. - store *configstore.Remote - storeAPI *configstore.API - - // watcher watches the store and applies changes to an instance.Manager, - // triggering metrics to be collected and sent. configWatcher also does a - // complete refresh of its state on an interval. - watcher *configWatcher -} - -// New creates a new Cluster. -func New( - l log.Logger, - reg prometheus.Registerer, - cfg Config, - im instance.Manager, - validate ValidationFunc, -) (*Cluster, error) { - - l = log.With(l, "component", "cluster") - - var ( - c = &Cluster{log: l, cfg: cfg, baseValidation: validate} - err error - ) - - // Hold the lock for the initialization. This is necessary since newNode will - // eventually call Reshard, and we want c.watcher to be initialized when that - // happens. - c.mut.Lock() - defer c.mut.Unlock() - - c.node, err = newNode(reg, l, cfg, c) - if err != nil { - return nil, fmt.Errorf("failed to initialize node membership: %w", err) - } - - c.store, err = configstore.NewRemote(l, reg, cfg.KVStore.Config, cfg.Enabled) - if err != nil { - return nil, fmt.Errorf("failed to initialize configstore: %w", err) - } - c.storeAPI = configstore.NewAPI(l, c.store, c.storeValidate, cfg.APIEnableGetConfiguration) - reg.MustRegister(c.storeAPI) - - c.watcher, err = newConfigWatcher(l, cfg, c.store, im, c.node.Owns, validate) - if err != nil { - return nil, fmt.Errorf("failed to initialize configwatcher: %w", err) - } - - // NOTE(rfratto): ApplyConfig isn't necessary for the initialization but must - // be called for any changes to the configuration. - return c, nil -} - -func (c *Cluster) storeValidate(cfg *instance.Config) error { - c.mut.RLock() - defer c.mut.RUnlock() - - if err := c.baseValidation(cfg); err != nil { - return err - } - - if c.cfg.DangerousAllowReadingFiles { - return nil - } - - // If configs aren't allowed to read from the store, we need to make sure no - // configs coming in from the API set files for passwords. - return validateNofiles(cfg) -} - -// Reshard implements agentproto.ScrapingServiceServer, and syncs the state of -// configs with the configstore. -func (c *Cluster) Reshard(ctx context.Context, _ *agentproto.ReshardRequest) (*empty.Empty, error) { - c.mut.RLock() - defer c.mut.RUnlock() - - level.Info(c.log).Log("msg", "received reshard notification, requesting refresh") - c.watcher.RequestRefresh() - return &empty.Empty{}, nil -} - -// ApplyConfig applies configuration changes to Cluster. -func (c *Cluster) ApplyConfig(cfg Config) error { - c.mut.Lock() - defer c.mut.Unlock() - - if util.CompareYAML(c.cfg, cfg) { - return nil - } - - if err := c.node.ApplyConfig(cfg); err != nil { - return fmt.Errorf("failed to apply config to node membership: %w", err) - } - - if err := c.store.ApplyConfig(cfg.Lifecycler.RingConfig.KVStore, cfg.Enabled); err != nil { - return fmt.Errorf("failed to apply config to config store: %w", err) - } - - if err := c.watcher.ApplyConfig(cfg); err != nil { - return fmt.Errorf("failed to apply config to watcher: %w", err) - } - - c.cfg = cfg - - // Force a refresh so all the configs get updated with new defaults. - level.Info(c.log).Log("msg", "cluster config changed, queueing refresh") - c.watcher.RequestRefresh() - return nil -} - -// WireAPI injects routes into the provided mux router for the config -// management API. -func (c *Cluster) WireAPI(r *mux.Router) { - c.storeAPI.WireAPI(r) - c.node.WireAPI(r) -} - -// WireGRPC injects gRPC server handlers into the provided gRPC server. -func (c *Cluster) WireGRPC(srv *grpc.Server) { - agentproto.RegisterScrapingServiceServer(srv, c) -} - -// Stop stops the cluster and all of its dependencies. -func (c *Cluster) Stop() { - c.mut.Lock() - defer c.mut.Unlock() - - deps := []struct { - name string - closer func() error - }{ - {"node", c.node.Stop}, - {"config store", c.store.Close}, - {"config watcher", c.watcher.Stop}, - } - for _, dep := range deps { - err := dep.closer() - if err != nil { - level.Error(c.log).Log("msg", "failed to stop dependency", "dependency", dep.name, "err", err) - } - } -} diff --git a/internal/static/metrics/cluster/config_watcher.go b/internal/static/metrics/cluster/config_watcher.go deleted file mode 100644 index 2544975c8d..0000000000 --- a/internal/static/metrics/cluster/config_watcher.go +++ /dev/null @@ -1,340 +0,0 @@ -package cluster - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/metrics/instance/configstore" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -var ( - reshardDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "agent_metrics_scraping_service_reshard_duration", - Help: "How long it took for resharding to run.", - }, []string{"success"}) -) - -// configWatcher connects to a configstore and will apply configs to an -// instance.Manager. -type configWatcher struct { - log log.Logger - - mut sync.Mutex - cfg Config - stopped bool - stop context.CancelFunc - - store configstore.Store - im instance.Manager - owns OwnershipFunc - validate ValidationFunc - - refreshCh chan struct{} - instanceMut sync.Mutex - instances map[string]struct{} -} - -// OwnershipFunc should determine if a given keep is owned by the caller. -type OwnershipFunc = func(key string) (bool, error) - -// ValidationFunc should validate a config. -type ValidationFunc = func(*instance.Config) error - -// newConfigWatcher watches store for changes and checks for each config against -// owns. It will also poll the configstore at a configurable interval. -func newConfigWatcher(log log.Logger, cfg Config, store configstore.Store, im instance.Manager, owns OwnershipFunc, validate ValidationFunc) (*configWatcher, error) { - ctx, cancel := context.WithCancel(context.Background()) - - w := &configWatcher{ - log: log, - - stop: cancel, - - store: store, - im: im, - owns: owns, - validate: validate, - - refreshCh: make(chan struct{}, 1), - instances: make(map[string]struct{}), - } - if err := w.ApplyConfig(cfg); err != nil { - return nil, err - } - // Delay duration, this is to prevent a race condition, see method for details - delay := cfg.Lifecycler.HeartbeatPeriod * 3 - go w.run(ctx, delay) - return w, nil -} - -func (w *configWatcher) ApplyConfig(cfg Config) error { - w.mut.Lock() - defer w.mut.Unlock() - - if util.CompareYAML(w.cfg, cfg) { - return nil - } - - if w.stopped { - return fmt.Errorf("configWatcher already stopped") - } - - w.cfg = cfg - return nil -} - -func (w *configWatcher) run(ctx context.Context, delay time.Duration) { - defer level.Info(w.log).Log("msg", "config watcher run loop exiting") - // This is due to a race condition between the heartbeat and config ring in a very narrow set of circumstances - // https://gist.github.com/mattdurham/c15f27de17a6da97bf2e6a870991c7f2 - time.Sleep(delay) - lastReshard := time.Now() - - for { - select { - case <-ctx.Done(): - return - case <-w.nextReshard(lastReshard): - level.Debug(w.log).Log("msg", "reshard timer ticked, scheduling refresh") - w.RequestRefresh() - lastReshard = time.Now() - case <-w.refreshCh: - err := w.refresh(ctx) - if err != nil { - level.Error(w.log).Log("msg", "refresh failed", "err", err) - } - case ev := <-w.store.Watch(): - level.Debug(w.log).Log("msg", "handling event from config store") - if err := w.handleEvent(ev); err != nil { - level.Error(w.log).Log("msg", "failed to handle changed or deleted config", "key", ev.Key, "err", err) - } - } - } -} - -// nextReshard returns a channel to that will fill a value when the reshard -// interval has elapsed. -func (w *configWatcher) nextReshard(lastReshard time.Time) <-chan time.Time { - w.mut.Lock() - nextReshard := lastReshard.Add(w.cfg.ReshardInterval) - w.mut.Unlock() - - remaining := time.Until(nextReshard) - - // NOTE(rfratto): clamping to 0 isn't necessary for time.After, - // but it makes the log message clearer to always use "0s" as - // "next reshard will be scheduled immediately." - if remaining < 0 { - remaining = 0 - } - - level.Debug(w.log).Log("msg", "waiting for next reshard interval", "last_reshard", lastReshard, "next_reshard", nextReshard, "remaining", remaining) - return time.After(remaining) -} - -// RequestRefresh will queue a refresh. No more than one refresh can be queued at a time. -func (w *configWatcher) RequestRefresh() { - select { - case w.refreshCh <- struct{}{}: - level.Debug(w.log).Log("msg", "successfully scheduled a refresh") - default: - level.Debug(w.log).Log("msg", "ignoring request refresh: refresh already scheduled") - } -} - -// refresh reloads all configs from the configstore. Deleted configs will be -// removed. refresh may not be called concurrently and must only be invoked from run. -// Call RequestRefresh to queue a call to refresh. -func (w *configWatcher) refresh(ctx context.Context) (err error) { - w.mut.Lock() - enabled := w.cfg.Enabled - refreshTimeout := w.cfg.ReshardTimeout - w.mut.Unlock() - - if !enabled { - level.Debug(w.log).Log("msg", "refresh skipped because clustering is disabled") - return nil - } - level.Info(w.log).Log("msg", "starting refresh") - - if refreshTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, refreshTimeout) - defer cancel() - } - - start := time.Now() - defer func() { - success := "1" - if err != nil { - success = "0" - } - duration := time.Since(start) - level.Info(w.log).Log("msg", "refresh finished", "duration", duration, "success", success, "err", err) - reshardDuration.WithLabelValues(success).Observe(duration.Seconds()) - }() - - // This is used to determine if the context was already exceeded before calling the kv provider - if err = ctx.Err(); err != nil { - level.Error(w.log).Log("msg", "context deadline exceeded before calling store.all", "err", err) - return err - } - deadline, _ := ctx.Deadline() - level.Debug(w.log).Log("msg", "deadline before store.all", "deadline", deadline) - configs, err := w.store.All(ctx, func(key string) bool { - owns, err := w.owns(key) - if err != nil { - level.Error(w.log).Log("msg", "failed to check for ownership, instance will be deleted if it is running", "key", key, "err", err) - return false - } - return owns - }) - level.Debug(w.log).Log("msg", "count of configs from store.all", "count", len(configs)) - - if err != nil { - return fmt.Errorf("failed to get configs from store: %w", err) - } - - var ( - keys = make(map[string]struct{}) - firstError error - ) - -Outer: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case cfg, ok := <-configs: - // w.store.All will close configs when all of them have been read. - if !ok { - break Outer - } - - if err := w.handleEvent(configstore.WatchEvent{Key: cfg.Name, Config: &cfg}); err != nil { - level.Error(w.log).Log("msg", "failed to process changed config", "key", cfg.Name, "err", err) - if firstError == nil { - firstError = err - } - } - - keys[cfg.Name] = struct{}{} - } - } - - // Any config we used to be running that disappeared from this most recent - // iteration should be deleted. We hold the lock just for the duration of - // populating deleted because handleEvent also grabs a hold on the lock. - var deleted []string - w.instanceMut.Lock() - for key := range w.instances { - if _, exist := keys[key]; exist { - continue - } - deleted = append(deleted, key) - } - w.instanceMut.Unlock() - - // Send a deleted event for any key that has gone away. - for _, key := range deleted { - if err := w.handleEvent(configstore.WatchEvent{Key: key, Config: nil}); err != nil { - level.Error(w.log).Log("msg", "failed to process changed config", "key", key, "err", err) - } - } - - return firstError -} - -func (w *configWatcher) handleEvent(ev configstore.WatchEvent) error { - w.mut.Lock() - defer w.mut.Unlock() - - if w.stopped { - return fmt.Errorf("configWatcher stopped") - } - - w.instanceMut.Lock() - defer w.instanceMut.Unlock() - - owned, err := w.owns(ev.Key) - if err != nil { - level.Error(w.log).Log("msg", "failed to see if config is owned. instance will be deleted if it is running", "err", err) - } - - var ( - _, isRunning = w.instances[ev.Key] - isDeleted = ev.Config == nil - ) - - switch { - // Two deletion scenarios: - // 1. A config we're running got moved to a new owner. - // 2. A config we're running got deleted - case (isRunning && !owned) || (isDeleted && isRunning): - if isDeleted { - level.Info(w.log).Log("msg", "untracking deleted config", "key", ev.Key) - } else { - level.Info(w.log).Log("msg", "untracking config that changed owners", "key", ev.Key) - } - - err := w.im.DeleteConfig(ev.Key) - delete(w.instances, ev.Key) - if err != nil { - return fmt.Errorf("failed to delete: %w", err) - } - - case !isDeleted && owned: - if err := w.validate(ev.Config); err != nil { - return fmt.Errorf( - "failed to validate config. %[1]s cannot run until the global settings are adjusted or the config is adjusted to operate within the global constraints. error: %[2]w", - ev.Key, err, - ) - } - - if _, exist := w.instances[ev.Key]; !exist { - level.Info(w.log).Log("msg", "tracking new config", "key", ev.Key) - } - - if err := w.im.ApplyConfig(*ev.Config); err != nil { - return fmt.Errorf("failed to apply config: %w", err) - } - w.instances[ev.Key] = struct{}{} - } - - return nil -} - -// Stop stops the configWatcher. Cannot be called more than once. -func (w *configWatcher) Stop() error { - w.mut.Lock() - defer w.mut.Unlock() - - if w.stopped { - return fmt.Errorf("already stopped") - } - w.stop() - w.stopped = true - - // Shut down all the instances that this configWatcher managed. It *MUST* - // happen after w.stop() is called to prevent the run loop from applying any - // new configs. - w.instanceMut.Lock() - defer w.instanceMut.Unlock() - - for key := range w.instances { - if err := w.im.DeleteConfig(key); err != nil { - level.Warn(w.log).Log("msg", "failed deleting config on shutdown", "key", key, "err", err) - } - } - w.instances = make(map[string]struct{}) - - return nil -} diff --git a/internal/static/metrics/cluster/config_watcher_test.go b/internal/static/metrics/cluster/config_watcher_test.go deleted file mode 100644 index e91bffe5d8..0000000000 --- a/internal/static/metrics/cluster/config_watcher_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package cluster - -import ( - "context" - "testing" - "time" - - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/metrics/instance/configstore" - "github.com/grafana/agent/internal/util" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func Test_configWatcher_Refresh(t *testing.T) { - var ( - log = util.TestLogger(t) - - cfg = DefaultConfig - store = configstore.Mock{ - WatchFunc: func() <-chan configstore.WatchEvent { - return make(chan configstore.WatchEvent) - }, - } - - im mockConfigManager - - validate = func(*instance.Config) error { return nil } - owned = func(key string) (bool, error) { return true, nil } - ) - cfg.Enabled = true - cfg.ReshardInterval = time.Hour - - w, err := newConfigWatcher(log, cfg, &store, &im, owned, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - // First: return a "hello" config. - store.AllFunc = func(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - ch := make(chan instance.Config) - go func() { - ch <- instance.Config{Name: "hello"} - close(ch) - }() - return ch, nil - } - - err = w.refresh(context.Background()) - require.NoError(t, err) - - // Then: return a "new" config. - store.AllFunc = func(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - ch := make(chan instance.Config, 1) - go func() { - ch <- instance.Config{Name: "new"} - close(ch) - }() - return ch, nil - } - - err = w.refresh(context.Background()) - require.NoError(t, err) - - // "hello" and "new" should've been applied, and "hello" should've been deleted - // from the second refresh. - im.AssertCalled(t, "ApplyConfig", instance.Config{Name: "hello"}) - im.AssertCalled(t, "ApplyConfig", instance.Config{Name: "new"}) - im.AssertCalled(t, "DeleteConfig", "hello") -} - -func Test_configWatcher_handleEvent(t *testing.T) { - var ( - cfg = DefaultConfig - store = configstore.Mock{ - WatchFunc: func() <-chan configstore.WatchEvent { - return make(chan configstore.WatchEvent) - }, - } - - validate = func(*instance.Config) error { return nil } - - owned = func(key string) (bool, error) { return true, nil } - unowned = func(key string) (bool, error) { return false, nil } - ) - cfg.Enabled = true - - t.Run("new owned config", func(t *testing.T) { - var ( - log = util.TestLogger(t) - im mockConfigManager - ) - - w, err := newConfigWatcher(log, cfg, &store, &im, owned, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - err = w.handleEvent(configstore.WatchEvent{Key: "new", Config: &instance.Config{}}) - require.NoError(t, err) - - im.AssertNumberOfCalls(t, "ApplyConfig", 1) - }) - - t.Run("updated owned config", func(t *testing.T) { - var ( - log = util.TestLogger(t) - im mockConfigManager - ) - - w, err := newConfigWatcher(log, cfg, &store, &im, owned, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - // One for create, one for update - err = w.handleEvent(configstore.WatchEvent{Key: "update", Config: &instance.Config{}}) - require.NoError(t, err) - - err = w.handleEvent(configstore.WatchEvent{Key: "update", Config: &instance.Config{}}) - require.NoError(t, err) - - im.AssertNumberOfCalls(t, "ApplyConfig", 2) - }) - - t.Run("new unowned config", func(t *testing.T) { - var ( - log = util.TestLogger(t) - im mockConfigManager - ) - - w, err := newConfigWatcher(log, cfg, &store, &im, unowned, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - // One for create, one for update - err = w.handleEvent(configstore.WatchEvent{Key: "unowned", Config: &instance.Config{}}) - require.NoError(t, err) - - im.AssertNumberOfCalls(t, "ApplyConfig", 0) - }) - - t.Run("lost ownership", func(t *testing.T) { - var ( - log = util.TestLogger(t) - - im mockConfigManager - - isOwned = true - owns = func(key string) (bool, error) { return isOwned, nil } - ) - - w, err := newConfigWatcher(log, cfg, &store, &im, owns, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - // One for create, then one for ownership change - err = w.handleEvent(configstore.WatchEvent{Key: "disappear", Config: &instance.Config{}}) - require.NoError(t, err) - - // Mark the config as unowned. The re-apply should then delete it. - isOwned = false - - err = w.handleEvent(configstore.WatchEvent{Key: "disappear", Config: &instance.Config{}}) - require.NoError(t, err) - - im.AssertNumberOfCalls(t, "ApplyConfig", 1) - im.AssertNumberOfCalls(t, "DeleteConfig", 1) - }) - - t.Run("deleted running config", func(t *testing.T) { - var ( - log = util.TestLogger(t) - - im mockConfigManager - ) - - w, err := newConfigWatcher(log, cfg, &store, &im, owned, validate) - require.NoError(t, err) - t.Cleanup(func() { _ = w.Stop() }) - - im.On("ApplyConfig", mock.Anything).Return(nil) - im.On("DeleteConfig", mock.Anything).Return(nil) - - // One for create, then one for deleted. - err = w.handleEvent(configstore.WatchEvent{Key: "new-key", Config: &instance.Config{}}) - require.NoError(t, err) - - err = w.handleEvent(configstore.WatchEvent{Key: "new-key", Config: nil}) - require.NoError(t, err) - - im.AssertNumberOfCalls(t, "ApplyConfig", 1) - im.AssertNumberOfCalls(t, "DeleteConfig", 1) - }) -} - -func Test_configWatcher_nextReshard(t *testing.T) { - watcher := &configWatcher{ - log: util.TestLogger(t), - cfg: Config{ReshardInterval: time.Second}, - } - - t.Run("past time", func(t *testing.T) { - select { - case <-watcher.nextReshard(time.Time{}): - case <-time.After(250 * time.Millisecond): - require.FailNow(t, "nextReshard did not return an already ready channel") - } - }) - - t.Run("future time", func(t *testing.T) { - select { - case <-watcher.nextReshard(time.Now()): - case <-time.After(1500 * time.Millisecond): - require.FailNow(t, "nextReshard took too long to return") - } - }) -} - -type mockConfigManager struct { - mock.Mock -} - -func (m *mockConfigManager) GetInstance(name string) (instance.ManagedInstance, error) { - args := m.Mock.Called() - return args.Get(0).(instance.ManagedInstance), args.Error(1) -} - -func (m *mockConfigManager) ListInstances() map[string]instance.ManagedInstance { - args := m.Mock.Called() - return args.Get(0).(map[string]instance.ManagedInstance) -} - -// ListConfigs implements Manager. -func (m *mockConfigManager) ListConfigs() map[string]instance.Config { - args := m.Mock.Called() - return args.Get(0).(map[string]instance.Config) -} - -// ApplyConfig implements Manager. -func (m *mockConfigManager) ApplyConfig(c instance.Config) error { - args := m.Mock.Called(c) - return args.Error(0) -} - -// DeleteConfig implements Manager. -func (m *mockConfigManager) DeleteConfig(name string) error { - args := m.Mock.Called(name) - return args.Error(0) -} - -// Stop implements Manager. -func (m *mockConfigManager) Stop() { - m.Mock.Called() -} diff --git a/internal/static/metrics/cluster/configapi/types.go b/internal/static/metrics/cluster/configapi/types.go deleted file mode 100644 index bf16b72bdb..0000000000 --- a/internal/static/metrics/cluster/configapi/types.go +++ /dev/null @@ -1,73 +0,0 @@ -package configapi - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// APIResponse is the base object returned for any API call. -// The Data field will be set to either nil or a value of -// another *Response type value from this package. -type APIResponse struct { - Status string `json:"status"` - Data interface{} `json:"data,omitempty"` -} - -// WriteTo writes the response to the given ResponseWriter with the provided -// statusCode. -func (r *APIResponse) WriteTo(w http.ResponseWriter, statusCode int) error { - bb, err := json.Marshal(r) - if err != nil { - // If we fail here, we should at least write a 500 back. - w.WriteHeader(http.StatusInternalServerError) - return err - } - - w.WriteHeader(statusCode) - n, err := w.Write(bb) - if err != nil { - return err - } else if n != len(bb) { - return fmt.Errorf("could not write full response. expected %d, wrote %d", len(bb), n) - } - - return nil -} - -// ErrorResponse is contained inside an APIResponse and returns -// an error string. Returned by any API call that can fail. -type ErrorResponse struct { - Error string `json:"error"` -} - -// ListConfigurationsResponse is contained inside an APIResponse -// and provides the list of configurations known to the KV store. -// Returned by ListConfigurations. -type ListConfigurationsResponse struct { - // Configs is the list of configuration names. - Configs []string `json:"configs"` -} - -// GetConfigurationResponse is contained inside an APIResponse -// and provides a single configuration known to the KV store. -// Returned by GetConfiguration. -type GetConfigurationResponse struct { - // Value is the stringified YAML configuration. - Value string `json:"value"` -} - -// WriteResponse writes a response object to the provided ResponseWriter w and with a -// status code of statusCode. resp is marshaled to JSON. -func WriteResponse(w http.ResponseWriter, statusCode int, resp interface{}) error { - apiResp := &APIResponse{Status: "success", Data: resp} - w.Header().Set("Content-Type", "application/json") - return apiResp.WriteTo(w, statusCode) -} - -// WriteError writes an error response back to the ResponseWriter. -func WriteError(w http.ResponseWriter, statusCode int, err error) error { - resp := &APIResponse{Status: "error", Data: &ErrorResponse{Error: err.Error()}} - w.Header().Set("Content-Type", "application/json") - return resp.WriteTo(w, statusCode) -} diff --git a/internal/static/metrics/cluster/node.go b/internal/static/metrics/cluster/node.go deleted file mode 100644 index fab9bc6b94..0000000000 --- a/internal/static/metrics/cluster/node.go +++ /dev/null @@ -1,381 +0,0 @@ -package cluster - -import ( - "context" - "fmt" - "hash/fnv" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - pb "github.com/grafana/agent/internal/static/agentproto" - "github.com/grafana/agent/internal/static/metrics/cluster/client" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/backoff" - "github.com/grafana/dskit/kv" - "github.com/grafana/dskit/ring" - "github.com/grafana/dskit/services" - "github.com/grafana/dskit/user" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - // agentKey is the key used for storing the hash ring. - agentKey = "agent" -) - -var backoffConfig = backoff.Config{ - MinBackoff: time.Second, - MaxBackoff: 2 * time.Minute, - MaxRetries: 10, -} - -// node manages membership within a ring. when a node joins or leaves the ring, -// it will inform other nodes to reshard their workloads. After a node joins -// the ring, it will inform the local service to reshard. -type node struct { - log log.Logger - reg *util.Unregisterer - srv pb.ScrapingServiceServer - - mut sync.RWMutex - cfg Config - ring *ring.Ring - lc *ring.Lifecycler - - exited bool - reload chan struct{} -} - -// newNode creates a new node and registers it to the ring. -func newNode(reg prometheus.Registerer, log log.Logger, cfg Config, s pb.ScrapingServiceServer) (*node, error) { - n := &node{ - reg: util.WrapWithUnregisterer(reg), - srv: s, - log: log, - - reload: make(chan struct{}, 1), - } - if err := n.ApplyConfig(cfg); err != nil { - return nil, err - } - go n.run() - return n, nil -} - -func (n *node) ApplyConfig(cfg Config) error { - n.mut.Lock() - defer n.mut.Unlock() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - // Detect if the config changed. - if util.CompareYAML(n.cfg, cfg) { - return nil - } - - if n.exited { - return fmt.Errorf("node already exited") - } - - level.Info(n.log).Log("msg", "applying config") - - // Shut down old components before re-creating the updated ones. - n.reg.UnregisterAll() - - if n.lc != nil { - // Note that this will call performClusterReshard and will block until it - // completes. - err := services.StopAndAwaitTerminated(ctx, n.lc) - if err != nil { - return fmt.Errorf("failed to stop lifecycler: %w", err) - } - n.lc = nil - } - - if n.ring != nil { - err := services.StopAndAwaitTerminated(ctx, n.ring) - if err != nil { - return fmt.Errorf("failed to stop ring: %w", err) - } - n.ring = nil - } - - if !cfg.Enabled { - n.cfg = cfg - return nil - } - - r, err := newRing(cfg.Lifecycler.RingConfig, "agent_viewer", agentKey, n.reg, n.log) - if err != nil { - return fmt.Errorf("failed to create ring: %w", err) - } - - if err := services.StartAndAwaitRunning(context.Background(), r); err != nil { - return fmt.Errorf("failed to start ring: %w", err) - } - n.ring = r - - lc, err := ring.NewLifecycler(cfg.Lifecycler.LifecyclerConfig, n, "agent", agentKey, false, n.log, prometheus.WrapRegistererWithPrefix("agent_dskit_", n.reg)) - if err != nil { - return fmt.Errorf("failed to create lifecycler: %w", err) - } - if err := services.StartAndAwaitRunning(context.Background(), lc); err != nil { - if err := services.StopAndAwaitTerminated(ctx, r); err != nil { - level.Error(n.log).Log("msg", "failed to stop ring when returning error. next config reload will fail", "err", err) - } - return fmt.Errorf("failed to start lifecycler: %w", err) - } - n.lc = lc - - n.cfg = cfg - - // Reload and reshard the cluster. - n.reload <- struct{}{} - return nil -} - -// newRing creates a new Cortex Ring that ignores unhealthy nodes. -func newRing(cfg ring.Config, name, key string, reg prometheus.Registerer, log log.Logger) (*ring.Ring, error) { - codec := ring.GetCodec() - store, err := kv.NewClient( - cfg.KVStore, - codec, - kv.RegistererWithKVName(reg, name+"-ring"), - log, - ) - if err != nil { - return nil, err - } - return ring.NewWithStoreClientAndStrategy(cfg, name, key, store, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("agent_dskit_", reg), log) -} - -// run waits for connection to the ring and kickstarts the join process. -func (n *node) run() { - for range n.reload { - n.mut.RLock() - - if err := n.performClusterReshard(context.Background(), true); err != nil { - level.Warn(n.log).Log("msg", "dynamic cluster reshard did not succeed", "err", err) - } - - n.mut.RUnlock() - } - - level.Info(n.log).Log("msg", "node run loop exiting") -} - -// performClusterReshard informs the cluster to immediately trigger a reshard -// of their workloads. if joining is true, the server provided to newNode will -// also be informed. -func (n *node) performClusterReshard(ctx context.Context, joining bool) error { - if n.ring == nil || n.lc == nil { - level.Info(n.log).Log("msg", "node disabled, not resharding") - return nil - } - - if n.cfg.ClusterReshardEventTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, n.cfg.ClusterReshardEventTimeout) - defer cancel() - } - - var ( - rs ring.ReplicationSet - err error - ) - - backoff := backoff.New(ctx, backoffConfig) - for backoff.Ongoing() { - if ctx.Err() != nil { - return ctx.Err() - } - rs, err = n.ring.GetAllHealthy(ring.Read) - if err == nil { - break - } - backoff.Wait() - } - - if len(rs.Instances) > 0 { - level.Info(n.log).Log("msg", "informing remote nodes to reshard") - } - - // These are not in the go routine below due to potential race condition with n.lc.addr - _, err = rs.Do(ctx, 500*time.Millisecond, func(c context.Context, id *ring.InstanceDesc) (interface{}, error) { - // Skip over ourselves. - if id.Addr == n.lc.Addr { - return nil, nil - } - - notifyCtx := user.InjectOrgID(c, "fake") - return nil, n.notifyReshard(notifyCtx, id) - }) - - if err != nil { - level.Error(n.log).Log("msg", "notifying other nodes failed", "err", err) - } - - if joining { - level.Info(n.log).Log("msg", "running local reshard") - if _, err := n.srv.Reshard(ctx, &pb.ReshardRequest{}); err != nil { - level.Warn(n.log).Log("msg", "dynamic local reshard did not succeed", "err", err) - } - } - return err -} - -// notifyReshard informs an individual node to reshard. -func (n *node) notifyReshard(ctx context.Context, id *ring.InstanceDesc) error { - cli, err := client.New(n.cfg.Client, id.Addr) - if err != nil { - return err - } - defer cli.Close() - - level.Info(n.log).Log("msg", "attempting to notify remote agent to reshard", "addr", id.Addr) - - backoff := backoff.New(ctx, backoffConfig) - for backoff.Ongoing() { - if ctx.Err() != nil { - return ctx.Err() - } - _, err := cli.Reshard(ctx, &pb.ReshardRequest{}) - if err == nil { - break - } - - level.Warn(n.log).Log("msg", "reshard notification attempt failed", "addr", id.Addr, "err", err, "attempt", backoff.NumRetries()) - backoff.Wait() - } - - return backoff.Err() -} - -// WaitJoined waits for the node the join the cluster and enter the -// ACTIVE state. -func (n *node) WaitJoined(ctx context.Context) error { - n.mut.RLock() - defer n.mut.RUnlock() - - level.Info(n.log).Log("msg", "waiting for the node to join the cluster") - defer level.Info(n.log).Log("msg", "node has joined the cluster") - - if n.ring == nil || n.lc == nil { - return fmt.Errorf("node disabled") - } - - return waitJoined(ctx, agentKey, n.ring.KVClient, n.lc.ID) -} - -func waitJoined(ctx context.Context, key string, kvClient kv.Client, id string) error { - kvClient.WatchKey(ctx, key, func(value interface{}) bool { - if value == nil { - return true - } - - desc := value.(*ring.Desc) - for ingID, ing := range desc.Ingesters { - if ingID == id && ing.State == ring.ACTIVE { - return false - } - } - - return true - }) - - return ctx.Err() -} - -func (n *node) WireAPI(r *mux.Router) { - r.HandleFunc("/debug/ring", func(rw http.ResponseWriter, r *http.Request) { - n.mut.RLock() - defer n.mut.RUnlock() - - if n.ring == nil { - http.NotFoundHandler().ServeHTTP(rw, r) - return - } - - n.ring.ServeHTTP(rw, r) - }) -} - -// Stop stops the node and cancels it from running. The node cannot be used -// again once Stop is called. -func (n *node) Stop() error { - n.mut.Lock() - defer n.mut.Unlock() - - if n.exited { - return fmt.Errorf("node already exited") - } - n.exited = true - - level.Info(n.log).Log("msg", "shutting down node") - - // Shut down dependencies. The lifecycler *MUST* be shut down first since n.ring is - // used during the shutdown process to inform other nodes to reshard. - // - // Note that stopping the lifecycler will call performClusterReshard and will block - // until it completes. - var ( - firstError error - deps []services.Service - ) - - if n.lc != nil { - deps = append(deps, n.lc) - } - if n.ring != nil { - deps = append(deps, n.ring) - } - for _, dep := range deps { - err := services.StopAndAwaitTerminated(context.Background(), dep) - if err != nil && firstError == nil { - firstError = err - } - } - - close(n.reload) - level.Info(n.log).Log("msg", "node shut down") - return firstError -} - -// Flush implements ring.FlushTransferer. It's a no-op. -func (n *node) Flush() {} - -// TransferOut implements ring.FlushTransferer. It connects to all other healthy agents and -// tells them to reshard. TransferOut should NOT be called manually unless the mutex is -// held. -func (n *node) TransferOut(ctx context.Context) error { - return n.performClusterReshard(ctx, false) -} - -// Owns checks to see if a key is owned by this node. owns will return -// an error if the ring is empty or if there aren't enough healthy nodes. -func (n *node) Owns(key string) (bool, error) { - n.mut.RLock() - defer n.mut.RUnlock() - - rs, err := n.ring.Get(keyHash(key), ring.Write, nil, nil, nil) - if err != nil { - return false, err - } - for _, r := range rs.Instances { - if r.Addr == n.lc.Addr { - return true, nil - } - } - return false, nil -} - -func keyHash(key string) uint32 { - h := fnv.New32() - _, _ = h.Write([]byte(key)) - return h.Sum32() -} diff --git a/internal/static/metrics/cluster/node_test.go b/internal/static/metrics/cluster/node_test.go deleted file mode 100644 index 15df4ab7fb..0000000000 --- a/internal/static/metrics/cluster/node_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package cluster - -import ( - "context" - "flag" - "fmt" - "math/rand" - "net" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/golang/protobuf/ptypes/empty" - "github.com/grafana/agent/internal/static/agentproto" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/ring" - "github.com/grafana/dskit/services" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - "google.golang.org/grpc" - "gopkg.in/yaml.v2" -) - -func Test_node_Join(t *testing.T) { - var ( - reg = prometheus.NewRegistry() - logger = util.TestLogger(t) - - localReshard = make(chan struct{}, 2) - remoteReshard = make(chan struct{}, 2) - ) - - local := &agentproto.FuncScrapingServiceServer{ - ReshardFunc: func(c context.Context, rr *agentproto.ReshardRequest) (*empty.Empty, error) { - localReshard <- struct{}{} - return &empty.Empty{}, nil - }, - } - - remote := &agentproto.FuncScrapingServiceServer{ - ReshardFunc: func(c context.Context, rr *agentproto.ReshardRequest) (*empty.Empty, error) { - remoteReshard <- struct{}{} - return &empty.Empty{}, nil - }, - } - startNode(t, remote, logger) - - nodeConfig := DefaultConfig - nodeConfig.Enabled = true - nodeConfig.Lifecycler.LifecyclerConfig = testLifecyclerConfig(t) - - n, err := newNode(reg, logger, nodeConfig, local) - require.NoError(t, err) - t.Cleanup(func() { _ = n.Stop() }) - - require.NoError(t, n.WaitJoined(context.Background())) - - waitAll(t, remoteReshard, localReshard) -} - -// waitAll waits for a message on all channels. -func waitAll(t *testing.T, chs ...chan struct{}) { - timeoutCh := time.After(5 * time.Second) - for _, ch := range chs { - select { - case <-timeoutCh: - require.FailNow(t, "timeout exceeded") - case <-ch: - } - } -} - -func Test_node_Leave(t *testing.T) { - var ( - reg = prometheus.NewRegistry() - logger = util.TestLogger(t) - - sendReshard = atomic.NewBool(false) - remoteReshard = make(chan struct{}, 2) - ) - - local := &agentproto.FuncScrapingServiceServer{ - ReshardFunc: func(c context.Context, rr *agentproto.ReshardRequest) (*empty.Empty, error) { - return &empty.Empty{}, nil - }, - } - - remote := &agentproto.FuncScrapingServiceServer{ - ReshardFunc: func(c context.Context, rr *agentproto.ReshardRequest) (*empty.Empty, error) { - if sendReshard.Load() { - remoteReshard <- struct{}{} - } - return &empty.Empty{}, nil - }, - } - startNode(t, remote, logger) - - nodeConfig := DefaultConfig - nodeConfig.Enabled = true - nodeConfig.Lifecycler.LifecyclerConfig = testLifecyclerConfig(t) - - n, err := newNode(reg, logger, nodeConfig, local) - require.NoError(t, err) - require.NoError(t, n.WaitJoined(context.Background())) - - // Update the reshard function to write to remoteReshard on shutdown. - sendReshard.Store(true) - - // Stop the node so it transfers data outward. - require.NoError(t, n.Stop(), "failed to stop the node") - - level.Info(logger).Log("msg", "waiting for remote reshard to occur") - waitAll(t, remoteReshard) -} - -func Test_node_ApplyConfig(t *testing.T) { - var ( - reg = prometheus.NewRegistry() - logger = util.TestLogger(t) - - localReshard = make(chan struct{}, 10) - ) - - local := &agentproto.FuncScrapingServiceServer{ - ReshardFunc: func(c context.Context, rr *agentproto.ReshardRequest) (*empty.Empty, error) { - localReshard <- struct{}{} - return &empty.Empty{}, nil - }, - } - - nodeConfig := DefaultConfig - nodeConfig.Enabled = true - nodeConfig.Lifecycler.LifecyclerConfig = testLifecyclerConfig(t) - - n, err := newNode(reg, logger, nodeConfig, local) - require.NoError(t, err) - t.Cleanup(func() { _ = n.Stop() }) - require.NoError(t, n.WaitJoined(context.Background())) - - // Wait for the initial join to trigger. - waitAll(t, localReshard) - - // An ApplyConfig working correctly should re-join the cluster, which can be - // detected by local resharding applying twice. - nodeConfig.Lifecycler.NumTokens = 1 - require.NoError(t, n.ApplyConfig(nodeConfig), "failed to apply new config") - require.NoError(t, n.WaitJoined(context.Background())) - - waitAll(t, localReshard) -} - -// startNode launches srv as a gRPC server and registers it to the ring. -func startNode(t *testing.T, srv agentproto.ScrapingServiceServer, logger log.Logger) { - t.Helper() - - l, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - grpcServer := grpc.NewServer() - agentproto.RegisterScrapingServiceServer(grpcServer, srv) - - go func() { - _ = grpcServer.Serve(l) - }() - t.Cleanup(func() { grpcServer.Stop() }) - - lcConfig := testLifecyclerConfig(t) - lcConfig.Addr = l.Addr().(*net.TCPAddr).IP.String() - lcConfig.Port = l.Addr().(*net.TCPAddr).Port - - lc, err := ring.NewLifecycler(lcConfig, ring.NewNoopFlushTransferer(), "agent", "agent", false, logger, nil) - require.NoError(t, err) - - err = services.StartAndAwaitRunning(context.Background(), lc) - require.NoError(t, err) - - // Wait for the new node to be in the ring. - joinWaitCtx, joinWaitCancel := context.WithTimeout(context.Background(), 5*time.Second) - defer joinWaitCancel() - err = waitJoined(joinWaitCtx, agentKey, lc.KVStore, lc.ID) - require.NoError(t, err) - - t.Cleanup(func() { - _ = services.StopAndAwaitTerminated(context.Background(), lc) - }) -} - -func testLifecyclerConfig(t *testing.T) ring.LifecyclerConfig { - t.Helper() - - cfgText := util.Untab(fmt.Sprintf(` -ring: - kvstore: - store: inmemory - prefix: tests/%s -final_sleep: 0s -min_ready_duration: 0s - `, t.Name())) - - // Apply default values by registering to a fake flag set. - var lc ring.LifecyclerConfig - lc.RegisterFlagsWithPrefix("", flag.NewFlagSet("", flag.ContinueOnError), log.NewNopLogger()) - - err := yaml.Unmarshal([]byte(cfgText), &lc) - require.NoError(t, err) - - // Assign a random default ID. - var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - name := make([]rune, 10) - for i := range name { - name[i] = letters[rand.Intn(len(letters))] - } - lc.ID = string(name) - - // Add an invalid default address/port. Tests can override if they expect - // incoming traffic. - lc.Addr = "x.x.x.x" - lc.Port = -1 - - return lc -} diff --git a/internal/static/metrics/cluster/validation.go b/internal/static/metrics/cluster/validation.go deleted file mode 100644 index 6821a8beeb..0000000000 --- a/internal/static/metrics/cluster/validation.go +++ /dev/null @@ -1,150 +0,0 @@ -package cluster - -import ( - "fmt" - - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/loki/clients/pkg/promtail/discovery/consulagent" - "github.com/prometheus/common/config" - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/aws" - "github.com/prometheus/prometheus/discovery/azure" - "github.com/prometheus/prometheus/discovery/consul" - "github.com/prometheus/prometheus/discovery/digitalocean" - "github.com/prometheus/prometheus/discovery/dns" - "github.com/prometheus/prometheus/discovery/eureka" - "github.com/prometheus/prometheus/discovery/file" - "github.com/prometheus/prometheus/discovery/gce" - "github.com/prometheus/prometheus/discovery/hetzner" - "github.com/prometheus/prometheus/discovery/http" - "github.com/prometheus/prometheus/discovery/kubernetes" - "github.com/prometheus/prometheus/discovery/linode" - "github.com/prometheus/prometheus/discovery/marathon" - "github.com/prometheus/prometheus/discovery/moby" - "github.com/prometheus/prometheus/discovery/openstack" - "github.com/prometheus/prometheus/discovery/scaleway" - "github.com/prometheus/prometheus/discovery/triton" - "github.com/prometheus/prometheus/discovery/zookeeper" -) - -func validateNofiles(c *instance.Config) error { - for i, rw := range c.RemoteWrite { - if err := validateHTTPNoFiles(&rw.HTTPClientConfig); err != nil { - return fmt.Errorf("failed to validate remote_write at index %d: %w", i, err) - } - } - - for i, sc := range c.ScrapeConfigs { - if err := validateHTTPNoFiles(&sc.HTTPClientConfig); err != nil { - return fmt.Errorf("failed to validate scrape_config at index %d: %w", i, err) - } - - for j, disc := range sc.ServiceDiscoveryConfigs { - if err := validateDiscoveryNoFiles(disc); err != nil { - return fmt.Errorf("failed to validate service discovery at index %d within scrape_config at index %d: %w", j, i, err) - } - } - } - - return nil -} - -func validateHTTPNoFiles(cfg *config.HTTPClientConfig) error { - checks := []struct { - name string - check func() bool - }{ - {"bearer_token_file", func() bool { return cfg.BearerTokenFile != "" }}, - {"password_file", func() bool { return cfg.BasicAuth != nil && cfg.BasicAuth.PasswordFile != "" }}, - {"credentials_file", func() bool { return cfg.Authorization != nil && cfg.Authorization.CredentialsFile != "" }}, - {"ca_file", func() bool { return cfg.TLSConfig.CAFile != "" }}, - {"cert_file", func() bool { return cfg.TLSConfig.CertFile != "" }}, - {"key_file", func() bool { return cfg.TLSConfig.KeyFile != "" }}, - } - for _, check := range checks { - if check.check() { - return fmt.Errorf("%s must be empty unless dangerous_allow_reading_files is set", check.name) - } - } - return nil -} - -func validateDiscoveryNoFiles(disc discovery.Config) error { - switch d := disc.(type) { - case discovery.StaticConfig: - // no-op - case *azure.SDConfig: - // no-op - case *consul.SDConfig: - if err := validateHTTPNoFiles(&config.HTTPClientConfig{TLSConfig: d.HTTPClientConfig.TLSConfig}); err != nil { - return err - } - case *consulagent.SDConfig: - if err := validateHTTPNoFiles(&config.HTTPClientConfig{TLSConfig: d.TLSConfig}); err != nil { - return err - } - case *digitalocean.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *dns.SDConfig: - // no-op - case *moby.DockerSwarmSDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *aws.EC2SDConfig: - // no-op - case *eureka.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *file.SDConfig: - // no-op - case *gce.SDConfig: - // no-op - case *hetzner.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *kubernetes.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *marathon.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - if d.AuthTokenFile != "" { - return fmt.Errorf("auth_token_file must be empty unless dangerous_allow_reading_files is set") - } - case *openstack.SDConfig: - if err := validateHTTPNoFiles(&config.HTTPClientConfig{TLSConfig: d.TLSConfig}); err != nil { - return err - } - case *scaleway.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *triton.SDConfig: - if err := validateHTTPNoFiles(&config.HTTPClientConfig{TLSConfig: d.TLSConfig}); err != nil { - return err - } - case *http.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *linode.SDConfig: - if err := validateHTTPNoFiles(&d.HTTPClientConfig); err != nil { - return err - } - case *zookeeper.NerveSDConfig: - // no-op - case *zookeeper.ServersetSDConfig: - // no-op - default: - return fmt.Errorf("unknown service discovery %s; rejecting config for safety. set dangerous_allow_reading_files to ignore", d.Name()) - } - - return nil -} diff --git a/internal/static/metrics/cluster/validation_test.go b/internal/static/metrics/cluster/validation_test.go deleted file mode 100644 index 180a0bfabb..0000000000 --- a/internal/static/metrics/cluster/validation_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package cluster - -import ( - "fmt" - "strings" - "testing" - - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/stretchr/testify/require" -) - -func Test_validateNoFiles(t *testing.T) { - tt := []struct { - name string - input string - expect error - }{ - { - name: "valid config", - input: util.Untab(` - scrape_configs: - - job_name: innocent_scrape - static_configs: - - targets: ['127.0.0.1:12345'] - remote_write: - - url: http://localhost:9009/api/prom/push - `), - expect: nil, - }, - { - name: "all SDs", - input: util.Untab(` - scrape_configs: - - job_name: basic_sds - static_configs: - - targets: ['localhost'] - azure_sd_configs: - - subscription_id: fake - tenant_id: fake - client_id: fake - client_secret: fake - consul_sd_configs: - - {} - dns_sd_configs: - - names: ['fake'] - ec2_sd_configs: - - region: fake - eureka_sd_configs: - - server: http://localhost:80/eureka - file_sd_configs: - - files: ['fake.json'] - digitalocean_sd_configs: - - {} - dockerswarm_sd_configs: - - host: localhost - role: nodes - gce_sd_configs: - - project: fake - zone: fake - hetzner_sd_configs: - - role: hcloud - kubernetes_sd_configs: - - role: pod - marathon_sd_configs: - - servers: ['localhost'] - nerve_sd_configs: - - servers: ['localhost'] - paths: ['/'] - openstack_sd_configs: - - role: instance - region: fake - scaleway_sd_configs: - - role: instance - project_id: ffffffff-ffff-ffff-ffff-ffffffffffff - secret_key: ffffffff-ffff-ffff-ffff-ffffffffffff - access_key: SCWXXXXXXXXXXXXXXXXX - serverset_sd_configs: - - servers: ['localhost'] - paths: ['/'] - triton_sd_configs: - - account: fake - dns_suffix: fake - endpoint: fake - `), - expect: nil, - }, - { - name: "invalid http client config", - input: util.Untab(` - scrape_configs: - - job_name: malicious_scrape - static_configs: - - targets: ['badsite.com'] - basic_auth: - username: file_leak - password_file: /etc/password - remote_write: - - url: http://localhost:9009/api/prom/push - `), - expect: fmt.Errorf("failed to validate scrape_config at index 0: password_file must be empty unless dangerous_allow_reading_files is set"), - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - cfg, err := instance.UnmarshalConfig(strings.NewReader(tc.input)) - require.NoError(t, err) - - actual := validateNofiles(cfg) - if tc.expect == nil { - require.NoError(t, actual) - } else { - require.EqualError(t, actual, tc.expect.Error()) - } - }) - } -} diff --git a/internal/static/metrics/http.go b/internal/static/metrics/http.go deleted file mode 100644 index 51e5aa64e6..0000000000 --- a/internal/static/metrics/http.go +++ /dev/null @@ -1,166 +0,0 @@ -package metrics - -import ( - "fmt" - "net/http" - "net/url" - "sort" - "time" - - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/scrape" -) - -// WireAPI adds API routes to the provided mux router. -func (a *Agent) WireAPI(r *mux.Router) { - a.cluster.WireAPI(r) - - // Backwards compatible endpoints. Use endpoints with `metrics` prefix instead - r.HandleFunc("/agent/api/v1/instances", a.ListInstancesHandler).Methods("GET") - r.HandleFunc("/agent/api/v1/targets", a.ListTargetsHandler).Methods("GET") - - r.HandleFunc("/agent/api/v1/metrics/instances", a.ListInstancesHandler).Methods("GET") - r.HandleFunc("/agent/api/v1/metrics/targets", a.ListTargetsHandler).Methods("GET") - r.HandleFunc("/agent/api/v1/metrics/instance/{instance}/write", a.PushMetricsHandler).Methods("POST") -} - -// ListInstancesHandler writes the set of currently running instances to the http.ResponseWriter. -func (a *Agent) ListInstancesHandler(w http.ResponseWriter, _ *http.Request) { - cfgs := a.mm.ListConfigs() - instanceNames := make([]string, 0, len(cfgs)) - for k := range cfgs { - instanceNames = append(instanceNames, k) - } - sort.Strings(instanceNames) - - err := configapi.WriteResponse(w, http.StatusOK, instanceNames) - if err != nil { - level.Error(a.logger).Log("msg", "failed to write response", "err", err) - } -} - -// ListTargetsHandler retrieves the full set of targets across all instances and shows -// information on them. -func (a *Agent) ListTargetsHandler(w http.ResponseWriter, r *http.Request) { - instances := a.mm.ListInstances() - allTagets := make(map[string]TargetSet, len(instances)) - for instName, inst := range instances { - allTagets[instName] = inst.TargetsActive() - } - ListTargetsHandler(allTagets).ServeHTTP(w, r) -} - -// ListTargetsHandler renders a mapping of instance to target set. -func ListTargetsHandler(targets map[string]TargetSet) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { - resp := ListTargetsResponse{} - - for instance, tset := range targets { - for key, targets := range tset { - for _, tgt := range targets { - var lastError string - if scrapeError := tgt.LastError(); scrapeError != nil { - lastError = scrapeError.Error() - } - - resp = append(resp, TargetInfo{ - InstanceName: instance, - TargetGroup: key, - - Endpoint: tgt.URL().String(), - State: string(tgt.Health()), - DiscoveredLabels: tgt.DiscoveredLabels(), - Labels: tgt.Labels(), - LastScrape: tgt.LastScrape(), - ScrapeDuration: tgt.LastScrapeDuration().Milliseconds(), - ScrapeError: lastError, - }) - } - } - } - - sort.Slice(resp, func(i, j int) bool { - // sort by instance, then target group, then job label, then instance label - var ( - iInstance = resp[i].InstanceName - iTargetGroup = resp[i].TargetGroup - iJobLabel = resp[i].Labels.Get(model.JobLabel) - iInstanceLabel = resp[i].Labels.Get(model.InstanceLabel) - - jInstance = resp[j].InstanceName - jTargetGroup = resp[j].TargetGroup - jJobLabel = resp[j].Labels.Get(model.JobLabel) - jInstanceLabel = resp[j].Labels.Get(model.InstanceLabel) - ) - - switch { - case iInstance != jInstance: - return iInstance < jInstance - case iTargetGroup != jTargetGroup: - return iTargetGroup < jTargetGroup - case iJobLabel != jJobLabel: - return iJobLabel < jJobLabel - default: - return iInstanceLabel < jInstanceLabel - } - }) - - _ = configapi.WriteResponse(rw, http.StatusOK, resp) - }) -} - -// TargetSet is a set of targets for an individual scraper. -type TargetSet map[string][]*scrape.Target - -// ListTargetsResponse is returned by the ListTargetsHandler. -type ListTargetsResponse []TargetInfo - -// TargetInfo describes a specific target. -type TargetInfo struct { - InstanceName string `json:"instance"` - TargetGroup string `json:"target_group"` - - Endpoint string `json:"endpoint"` - State string `json:"state"` - Labels labels.Labels `json:"labels"` - DiscoveredLabels labels.Labels `json:"discovered_labels"` - LastScrape time.Time `json:"last_scrape"` - ScrapeDuration int64 `json:"scrape_duration_ms"` - ScrapeError string `json:"scrape_error"` -} - -// PushMetricsHandler provides a way to POST data directly into -// an instance's WAL. -func (a *Agent) PushMetricsHandler(w http.ResponseWriter, r *http.Request) { - // Get instance name. - instanceName, err := getInstanceName(r) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Get the metrics instance and serve the request. - managedInstance, err := a.InstanceManager().GetInstance(instanceName) - if err != nil || managedInstance == nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - managedInstance.WriteHandler().ServeHTTP(w, r) -} - -// getInstanceName uses gorilla/mux's route variables to extract the -// "instance" variable. If not found, getInstanceName will return an error. -func getInstanceName(r *http.Request) (string, error) { - vars := mux.Vars(r) - name := vars["instance"] - name, err := url.PathUnescape(name) - if err != nil { - return "", fmt.Errorf("could not decode instance name: %w", err) - } - return name, nil -} diff --git a/internal/static/metrics/http_test.go b/internal/static/metrics/http_test.go deleted file mode 100644 index 7f557a5a40..0000000000 --- a/internal/static/metrics/http_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package metrics - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/scrape" - "github.com/stretchr/testify/require" -) - -func TestAgent_ListInstancesHandler(t *testing.T) { - fact := newFakeInstanceFactory() - a, err := newAgent(prometheus.NewRegistry(), Config{ - WALDir: "/tmp/agent", - }, log.NewNopLogger(), fact.factory) - require.NoError(t, err) - defer a.Stop() - - r := httptest.NewRequest("GET", "/agent/api/v1/metrics/instances", nil) - - t.Run("no instances", func(t *testing.T) { - rr := httptest.NewRecorder() - a.ListInstancesHandler(rr, r) - expect := `{"status":"success","data":[]}` - require.Equal(t, expect, rr.Body.String()) - }) - - t.Run("non-empty", func(t *testing.T) { - require.NoError(t, a.mm.ApplyConfig(makeInstanceConfig("foo"))) - require.NoError(t, a.mm.ApplyConfig(makeInstanceConfig("bar"))) - - expect := `{"status":"success","data":["bar","foo"]}` - util.Eventually(t, func(t require.TestingT) { - rr := httptest.NewRecorder() - a.ListInstancesHandler(rr, r) - require.Equal(t, expect, rr.Body.String()) - }) - }) -} - -func TestAgent_ListTargetsHandler(t *testing.T) { - fact := newFakeInstanceFactory() - a, err := newAgent(prometheus.NewRegistry(), Config{ - WALDir: "/tmp/agent", - }, log.NewNopLogger(), fact.factory) - require.NoError(t, err) - - mockManager := &instance.MockManager{ - ListInstancesFunc: func() map[string]instance.ManagedInstance { return nil }, - ListConfigsFunc: func() map[string]instance.Config { return nil }, - ApplyConfigFunc: func(_ instance.Config) error { return nil }, - DeleteConfigFunc: func(name string) error { return nil }, - StopFunc: func() {}, - } - a.mm, err = instance.NewModalManager(prometheus.NewRegistry(), a.logger, mockManager, instance.ModeDistinct) - require.NoError(t, err) - - r := httptest.NewRequest("GET", "/agent/api/v1/metrics/targets", nil) - - t.Run("scrape manager not ready", func(t *testing.T) { - mockManager.ListInstancesFunc = func() map[string]instance.ManagedInstance { - return map[string]instance.ManagedInstance{ - "test_instance": &mockInstanceScrape{}, - } - } - - rr := httptest.NewRecorder() - a.ListTargetsHandler(rr, r) - expect := `{"status": "success", "data": []}` - require.JSONEq(t, expect, rr.Body.String()) - require.Equal(t, http.StatusOK, rr.Result().StatusCode) - }) - - t.Run("scrape manager targets", func(t *testing.T) { - tgt := scrape.NewTarget(labels.FromMap(map[string]string{ - model.JobLabel: "job", - model.InstanceLabel: "instance", - "foo": "bar", - model.SchemeLabel: "http", - model.AddressLabel: "localhost:12345", - model.MetricsPathLabel: "/metrics", - }), labels.FromMap(map[string]string{ - "__discovered__": "yes", - }), nil) - - startTime := time.Date(1994, time.January, 12, 0, 0, 0, 0, time.UTC) - tgt.Report(startTime, time.Minute, fmt.Errorf("something went wrong")) - - mockManager.ListInstancesFunc = func() map[string]instance.ManagedInstance { - return map[string]instance.ManagedInstance{ - "test_instance": &mockInstanceScrape{ - tgts: map[string][]*scrape.Target{ - "group_a": {tgt}, - }, - }, - } - } - - rr := httptest.NewRecorder() - a.ListTargetsHandler(rr, r) - expect := `{ - "status": "success", - "data": [{ - "instance": "test_instance", - "target_group": "group_a", - "endpoint": "http://localhost:12345/metrics", - "state": "down", - "labels": { - "foo": "bar", - "instance": "instance", - "job": "job" - }, - "discovered_labels": { - "__discovered__": "yes" - }, - "last_scrape": "1994-01-12T00:00:00Z", - "scrape_duration_ms": 60000, - "scrape_error":"something went wrong" - }] - }` - require.JSONEq(t, expect, rr.Body.String()) - require.Equal(t, http.StatusOK, rr.Result().StatusCode) - }) -} - -type mockInstanceScrape struct { - instance.NoOpInstance - tgts map[string][]*scrape.Target -} - -func (i *mockInstanceScrape) TargetsActive() map[string][]*scrape.Target { - return i.tgts -} diff --git a/internal/static/metrics/instance/configstore/api.go b/internal/static/metrics/instance/configstore/api.go deleted file mode 100644 index 552b6ba404..0000000000 --- a/internal/static/metrics/instance/configstore/api.go +++ /dev/null @@ -1,268 +0,0 @@ -package configstore - -import ( - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/prometheus/client_golang/prometheus" -) - -// API is an HTTP API to interact with a configstore. -type API struct { - log log.Logger - storeMut sync.Mutex - store Store - validator Validator - - totalCreatedConfigs prometheus.Counter - totalUpdatedConfigs prometheus.Counter - totalDeletedConfigs prometheus.Counter - - enableGet bool -} - -// Validator valides a config before putting it into the store. -// Validator is allowed to mutate the config and will only be given a copy. -type Validator = func(c *instance.Config) error - -// NewAPI creates a new API. Store can be applied later with SetStore. -func NewAPI(l log.Logger, store Store, v Validator, enableGet bool) *API { - return &API{ - log: l, - store: store, - validator: v, - - totalCreatedConfigs: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "agent_metrics_ha_configs_created_total", - Help: "Total number of created scraping service configs", - }), - totalUpdatedConfigs: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "agent_metrics_ha_configs_updated_total", - Help: "Total number of updated scraping service configs", - }), - totalDeletedConfigs: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "agent_metrics_ha_configs_deleted_total", - Help: "Total number of deleted scraping service configs", - }), - enableGet: enableGet, - } -} - -// WireAPI injects routes into the provided mux router for the config -// store API. -func (api *API) WireAPI(r *mux.Router) { - // Support URL-encoded config names. The handlers will need to decode the - // name when reading the path variable. - r = r.UseEncodedPath() - - r.HandleFunc("/agent/api/v1/configs", api.ListConfigurations).Methods("GET") - getConfigHandler := messageHandlerFunc(http.StatusNotFound, "404 - config endpoint is disabled") - if api.enableGet { - getConfigHandler = api.GetConfiguration - } - r.HandleFunc("/agent/api/v1/configs/{name}", getConfigHandler).Methods("GET") - r.HandleFunc("/agent/api/v1/config/{name}", api.PutConfiguration).Methods("PUT", "POST") - r.HandleFunc("/agent/api/v1/config/{name}", api.DeleteConfiguration).Methods("DELETE") -} - -// Describe implements prometheus.Collector. -func (api *API) Describe(ch chan<- *prometheus.Desc) { - ch <- api.totalCreatedConfigs.Desc() - ch <- api.totalUpdatedConfigs.Desc() - ch <- api.totalDeletedConfigs.Desc() -} - -// Collect implements prometheus.Collector. -func (api *API) Collect(mm chan<- prometheus.Metric) { - mm <- api.totalCreatedConfigs - mm <- api.totalUpdatedConfigs - mm <- api.totalDeletedConfigs -} - -// ListConfigurations returns a list of configurations. -func (api *API) ListConfigurations(rw http.ResponseWriter, r *http.Request) { - api.storeMut.Lock() - defer api.storeMut.Unlock() - if api.store == nil { - api.writeError(rw, http.StatusNotFound, fmt.Errorf("no config store running")) - return - } - - keys, err := api.store.List(r.Context()) - if errors.Is(err, ErrNotConnected) { - api.writeError(rw, http.StatusNotFound, fmt.Errorf("no config store running")) - return - } else if err != nil { - api.writeError(rw, http.StatusInternalServerError, fmt.Errorf("failed to write config: %w", err)) - return - } - api.writeResponse(rw, http.StatusOK, configapi.ListConfigurationsResponse{Configs: keys}) -} - -// GetConfiguration gets an individual configuration. -func (api *API) GetConfiguration(rw http.ResponseWriter, r *http.Request) { - api.storeMut.Lock() - defer api.storeMut.Unlock() - if api.store == nil { - api.writeError(rw, http.StatusNotFound, fmt.Errorf("no config store running")) - return - } - - configKey, err := getConfigName(r) - if err != nil { - api.writeError(rw, http.StatusBadRequest, err) - return - } - - cfg, err := api.store.Get(r.Context(), configKey) - switch { - case errors.Is(err, ErrNotConnected): - api.writeError(rw, http.StatusNotFound, err) - case errors.As(err, &NotExistError{}): - api.writeError(rw, http.StatusNotFound, err) - case err != nil: - api.writeError(rw, http.StatusInternalServerError, err) - case err == nil: - bb, err := instance.MarshalConfig(&cfg, true) - if err != nil { - api.writeError(rw, http.StatusInternalServerError, fmt.Errorf("could not marshal config for response: %w", err)) - return - } - api.writeResponse(rw, http.StatusOK, &configapi.GetConfigurationResponse{ - Value: string(bb), - }) - } -} - -// PutConfiguration creates or updates a configuration. -func (api *API) PutConfiguration(rw http.ResponseWriter, r *http.Request) { - api.storeMut.Lock() - defer api.storeMut.Unlock() - if api.store == nil { - api.writeError(rw, http.StatusNotFound, fmt.Errorf("no config store running")) - return - } - - configName, err := getConfigName(r) - if err != nil { - api.writeError(rw, http.StatusBadRequest, err) - return - } - - var config strings.Builder - if _, err := io.Copy(&config, r.Body); err != nil { - api.writeError(rw, http.StatusInternalServerError, err) - return - } - - cfg, err := instance.UnmarshalConfig(strings.NewReader(config.String())) - if err != nil { - api.writeError(rw, http.StatusBadRequest, fmt.Errorf("could not unmarshal config: %w", err)) - return - } - cfg.Name = configName - - if api.validator != nil { - validateCfg, err := instance.UnmarshalConfig(strings.NewReader(config.String())) - if err != nil { - api.writeError(rw, http.StatusBadRequest, fmt.Errorf("could not unmarshal config: %w", err)) - return - } - validateCfg.Name = configName - - if err := api.validator(validateCfg); err != nil { - api.writeError(rw, http.StatusBadRequest, fmt.Errorf("failed to validate config: %w", err)) - return - } - } - - created, err := api.store.Put(r.Context(), *cfg) - switch { - case errors.Is(err, ErrNotConnected): - api.writeError(rw, http.StatusNotFound, err) - case errors.As(err, &NotUniqueError{}): - api.writeError(rw, http.StatusBadRequest, err) - case err != nil: - api.writeError(rw, http.StatusInternalServerError, err) - default: - if created { - api.totalCreatedConfigs.Inc() - api.writeResponse(rw, http.StatusCreated, nil) - } else { - api.totalUpdatedConfigs.Inc() - api.writeResponse(rw, http.StatusOK, nil) - } - } -} - -// DeleteConfiguration deletes a configuration. -func (api *API) DeleteConfiguration(rw http.ResponseWriter, r *http.Request) { - api.storeMut.Lock() - defer api.storeMut.Unlock() - if api.store == nil { - api.writeError(rw, http.StatusNotFound, fmt.Errorf("no config store running")) - return - } - - configKey, err := getConfigName(r) - if err != nil { - api.writeError(rw, http.StatusBadRequest, err) - return - } - - err = api.store.Delete(r.Context(), configKey) - switch { - case errors.Is(err, ErrNotConnected): - api.writeError(rw, http.StatusNotFound, err) - case errors.As(err, &NotExistError{}): - api.writeError(rw, http.StatusNotFound, err) - case err != nil: - api.writeError(rw, http.StatusInternalServerError, err) - default: - api.totalDeletedConfigs.Inc() - api.writeResponse(rw, http.StatusOK, nil) - } -} - -func (api *API) writeError(rw http.ResponseWriter, statusCode int, writeErr error) { - err := configapi.WriteError(rw, statusCode, writeErr) - if err != nil { - level.Error(api.log).Log("msg", "failed to write response", "err", err) - } -} - -func (api *API) writeResponse(rw http.ResponseWriter, statusCode int, v interface{}) { - err := configapi.WriteResponse(rw, statusCode, v) - if err != nil { - level.Error(api.log).Log("msg", "failed to write response", "err", err) - } -} - -// getConfigName uses gorilla/mux's route variables to extract the -// "name" variable. If not found, getConfigName will return an error. -func getConfigName(r *http.Request) (string, error) { - vars := mux.Vars(r) - name := vars["name"] - name, err := url.PathUnescape(name) - if err != nil { - return "", fmt.Errorf("could not decode config name: %w", err) - } - return name, nil -} - -func messageHandlerFunc(statusCode int, msg string) http.HandlerFunc { - return func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(statusCode) - _, _ = rw.Write([]byte(msg)) - } -} diff --git a/internal/static/metrics/instance/configstore/api_test.go b/internal/static/metrics/instance/configstore/api_test.go deleted file mode 100644 index 71bd558810..0000000000 --- a/internal/static/metrics/instance/configstore/api_test.go +++ /dev/null @@ -1,408 +0,0 @@ -package configstore - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/client" - "github.com/grafana/agent/internal/static/metrics/cluster/configapi" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestAPI_ListConfigurations(t *testing.T) { - s := &Mock{ - ListFunc: func(ctx context.Context) ([]string, error) { - return []string{"a", "b", "c"}, nil - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - resp, err := http.Get(env.srv.URL + "/agent/api/v1/configs") - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - - expect := `{ - "status": "success", - "data": { - "configs": ["a", "b", "c"] - } - }` - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, expect, string(body)) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - apiResp, err := cli.ListConfigs(context.Background()) - require.NoError(t, err) - - expect := &configapi.ListConfigurationsResponse{Configs: []string{"a", "b", "c"}} - require.Equal(t, expect, apiResp) - }) -} - -func TestAPI_GetConfiguration_Invalid(t *testing.T) { - s := &Mock{ - GetFunc: func(ctx context.Context, key string) (instance.Config, error) { - return instance.Config{}, NotExistError{Key: key} - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - resp, err := http.Get(env.srv.URL + "/agent/api/v1/configs/does-not-exist") - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, resp.StatusCode) - - expect := `{ - "status": "error", - "data": { - "error": "configuration does-not-exist does not exist" - } - }` - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, expect, string(body)) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - _, err := cli.GetConfiguration(context.Background(), "does-not-exist") - require.NotNil(t, err) - require.Equal(t, "configuration does-not-exist does not exist", err.Error()) - }) -} - -func TestAPI_GetConfiguration(t *testing.T) { - s := &Mock{ - GetFunc: func(ctx context.Context, key string) (instance.Config, error) { - return instance.Config{ - Name: key, - HostFilter: true, - RemoteFlushDeadline: 10 * time.Minute, - }, nil - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - resp, err := http.Get(env.srv.URL + "/agent/api/v1/configs/exists") - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - - expect := `{ - "status": "success", - "data": { - "value": "name: exists\nhost_filter: true\nremote_flush_deadline: 10m0s\n" - } - }` - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, expect, string(body)) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - actual, err := cli.GetConfiguration(context.Background(), "exists") - require.NoError(t, err) - - // The client will apply defaults, so we need to start with the DefaultConfig - // as a base here. - expect := instance.DefaultConfig - expect.Name = "exists" - expect.HostFilter = true - expect.RemoteFlushDeadline = 10 * time.Minute - require.Equal(t, &expect, actual) - }) -} - -func TestAPI_GetConfiguration_ScrubSecrets(t *testing.T) { - rawConfig := `name: exists -scrape_configs: -- job_name: local_scrape - follow_redirects: true - enable_http2: true - honor_timestamps: true - metrics_path: /metrics - scheme: http - track_timestamps_staleness: true - static_configs: - - targets: - - 127.0.0.1:12345 - labels: - cluster: localhost - basic_auth: - username: admin - password: SCRUBME -remote_write: -- url: http://localhost:9009/api/prom/push - remote_timeout: 30s - name: test-d0f32c - send_exemplars: true - basic_auth: - username: admin - password: SCRUBME - queue_config: - capacity: 500 - max_shards: 1000 - min_shards: 1 - max_samples_per_send: 100 - batch_send_deadline: 5s - min_backoff: 30ms - max_backoff: 100ms - retry_on_http_429: true - follow_redirects: true - enable_http2: true - metadata_config: - send: true - send_interval: 1m - max_samples_per_send: 500 -wal_truncate_frequency: 1m0s -min_wal_time: 5m0s -max_wal_time: 4h0m0s -remote_flush_deadline: 1m0s -` - scrubbedConfig := strings.ReplaceAll(rawConfig, "SCRUBME", "") - - s := &Mock{ - GetFunc: func(ctx context.Context, key string) (instance.Config, error) { - c, err := instance.UnmarshalConfig(strings.NewReader(rawConfig)) - if err != nil { - return instance.Config{}, err - } - return *c, nil - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - resp, err := http.Get(env.srv.URL + "/agent/api/v1/configs/exists") - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - respBytes, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - var apiResp struct { - Status string `json:"status"` - Data struct { - Value string `json:"value"` - } `json:"data"` - } - err = json.Unmarshal(respBytes, &apiResp) - require.NoError(t, err) - require.Equal(t, "success", apiResp.Status) - require.YAMLEq(t, scrubbedConfig, apiResp.Data.Value) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - actual, err := cli.GetConfiguration(context.Background(), "exists") - require.NoError(t, err) - - // Marshal the retrieved config _without_ scrubbing. This means - // that if the secrets weren't scrubbed from GetConfiguration, something - // bad happened at the API level. - actualBytes, err := instance.MarshalConfig(actual, false) - require.NoError(t, err) - require.YAMLEq(t, scrubbedConfig, string(actualBytes)) - }) -} - -func TestServer_GetConfiguration_Disabled(t *testing.T) { - api := NewAPI(log.NewNopLogger(), nil, nil, false) - env := newAPITestEnvironment(t, api) - resp, err := http.Get(env.srv.URL + "/agent/api/v1/configs/exists") - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, resp.StatusCode) - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.Equal(t, []byte("404 - config endpoint is disabled"), body) -} - -func TestServer_PutConfiguration(t *testing.T) { - var s Mock - - api := NewAPI(log.NewNopLogger(), &s, nil, true) - env := newAPITestEnvironment(t, api) - - cfg := instance.Config{Name: "newconfig"} - bb, err := instance.MarshalConfig(&cfg, false) - require.NoError(t, err) - - t.Run("Created", func(t *testing.T) { - // Created configs should return http.StatusCreated - s.PutFunc = func(ctx context.Context, c instance.Config) (created bool, err error) { - return true, nil - } - - resp, err := http.Post(env.srv.URL+"/agent/api/v1/config/newconfig", "", bytes.NewReader(bb)) - require.NoError(t, err) - require.Equal(t, http.StatusCreated, resp.StatusCode) - }) - - t.Run("Updated", func(t *testing.T) { - // Updated configs should return http.StatusOK - s.PutFunc = func(ctx context.Context, c instance.Config) (created bool, err error) { - return false, nil - } - - resp, err := http.Post(env.srv.URL+"/agent/api/v1/config/newconfig", "", bytes.NewReader(bb)) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestServer_PutConfiguration_Invalid(t *testing.T) { - var s Mock - - api := NewAPI(log.NewNopLogger(), &s, func(c *instance.Config) error { - return fmt.Errorf("custom validation error") - }, true) - env := newAPITestEnvironment(t, api) - - cfg := instance.Config{Name: "newconfig"} - bb, err := instance.MarshalConfig(&cfg, false) - require.NoError(t, err) - - resp, err := http.Post(env.srv.URL+"/agent/api/v1/config/newconfig", "", bytes.NewReader(bb)) - require.NoError(t, err) - require.Equal(t, http.StatusBadRequest, resp.StatusCode) - - expect := `{ - "status": "error", - "data": { - "error": "failed to validate config: custom validation error" - } - }` - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.JSONEq(t, expect, string(body)) -} - -func TestServer_PutConfiguration_WithClient(t *testing.T) { - var s Mock - api := NewAPI(log.NewNopLogger(), &s, nil, true) - env := newAPITestEnvironment(t, api) - - cfg := instance.DefaultConfig - cfg.Name = "newconfig-withclient" - cfg.HostFilter = true - cfg.RemoteFlushDeadline = 10 * time.Minute - - s.PutFunc = func(ctx context.Context, c instance.Config) (created bool, err error) { - assert.Equal(t, cfg, c) - return true, nil - } - - cli := client.New(env.srv.URL) - err := cli.PutConfiguration(context.Background(), "newconfig-withclient", &cfg) - require.NoError(t, err) -} - -func TestServer_DeleteConfiguration(t *testing.T) { - s := &Mock{ - DeleteFunc: func(ctx context.Context, key string) error { - assert.Equal(t, "deleteme", key) - return nil - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - req, err := http.NewRequest(http.MethodDelete, env.srv.URL+"/agent/api/v1/config/deleteme", nil) - require.NoError(t, err) - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - err := cli.DeleteConfiguration(context.Background(), "deleteme") - require.NoError(t, err) - }) -} - -func TestServer_DeleteConfiguration_Invalid(t *testing.T) { - s := &Mock{ - DeleteFunc: func(ctx context.Context, key string) error { - assert.Equal(t, "deleteme", key) - return NotExistError{Key: key} - }, - } - - api := NewAPI(log.NewNopLogger(), s, nil, true) - env := newAPITestEnvironment(t, api) - - req, err := http.NewRequest(http.MethodDelete, env.srv.URL+"/agent/api/v1/config/deleteme", nil) - require.NoError(t, err) - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, resp.StatusCode) - - t.Run("With Client", func(t *testing.T) { - cli := client.New(env.srv.URL) - err := cli.DeleteConfiguration(context.Background(), "deleteme") - require.Error(t, err) - }) -} - -func TestServer_URLEncoded(t *testing.T) { - var s Mock - - api := NewAPI(log.NewNopLogger(), &s, nil, true) - env := newAPITestEnvironment(t, api) - - var cfg instance.Config - bb, err := instance.MarshalConfig(&cfg, false) - require.NoError(t, err) - - s.PutFunc = func(ctx context.Context, c instance.Config) (created bool, err error) { - assert.Equal(t, "url/encoded", c.Name) - return true, nil - } - - resp, err := http.Post(env.srv.URL+"/agent/api/v1/config/url%2Fencoded", "", bytes.NewReader(bb)) - require.NoError(t, err) - require.Equal(t, http.StatusCreated, resp.StatusCode) - - s.GetFunc = func(ctx context.Context, key string) (instance.Config, error) { - assert.Equal(t, "url/encoded", key) - return instance.Config{Name: "url/encoded"}, nil - } - - resp, err = http.Get(env.srv.URL + "/agent/api/v1/configs/url%2Fencoded") - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) -} - -type apiTestEnvironment struct { - srv *httptest.Server - router *mux.Router -} - -func newAPITestEnvironment(t *testing.T, api *API) apiTestEnvironment { - t.Helper() - - router := mux.NewRouter() - srv := httptest.NewServer(router) - t.Cleanup(srv.Close) - - api.WireAPI(router) - - return apiTestEnvironment{srv: srv, router: router} -} diff --git a/internal/static/metrics/instance/configstore/codec.go b/internal/static/metrics/instance/configstore/codec.go deleted file mode 100644 index 38a837c5be..0000000000 --- a/internal/static/metrics/instance/configstore/codec.go +++ /dev/null @@ -1,65 +0,0 @@ -package configstore - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "strings" - - "github.com/grafana/dskit/kv/codec" -) - -// GetCodec returns the codec for encoding and decoding instance.Configs -// in the Remote store. -func GetCodec() codec.Codec { - return &yamlCodec{} -} - -type yamlCodec struct{} - -func (*yamlCodec) Decode(bb []byte) (interface{}, error) { - // Decode is called by kv.Clients with an empty slice when a - // key is deleted. We should stop early here and don't return - // an error so the deletion event propagates to watchers. - if len(bb) == 0 { - return nil, nil - } - - r, err := gzip.NewReader(bytes.NewReader(bb)) - if err != nil { - return nil, err - } - - var sb strings.Builder - if _, err := io.Copy(&sb, r); err != nil { - return nil, err - } - return sb.String(), nil -} - -func (*yamlCodec) Encode(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - var cfg string - - switch v := v.(type) { - case string: - cfg = v - default: - panic(fmt.Sprintf("unexpected type %T passed to yamlCodec.Encode", v)) - } - - w := gzip.NewWriter(&buf) - - if _, err := io.Copy(w, strings.NewReader(cfg)); err != nil { - return nil, err - } - - w.Close() - return buf.Bytes(), nil -} - -func (*yamlCodec) CodecID() string { - return "agentConfig/yaml" -} diff --git a/internal/static/metrics/instance/configstore/codec_test.go b/internal/static/metrics/instance/configstore/codec_test.go deleted file mode 100644 index ab717c0bb6..0000000000 --- a/internal/static/metrics/instance/configstore/codec_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package configstore - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestCodec(t *testing.T) { - exampleConfig := `name: 'test' -host_filter: false -scrape_configs: - - job_name: process-1 - static_configs: - - targets: ['process-1:80'] - labels: - cluster: 'local' - origin: 'agent'` - - c := &yamlCodec{} - bb, err := c.Encode(exampleConfig) - require.NoError(t, err) - - out, err := c.Decode(bb) - require.NoError(t, err) - require.Equal(t, exampleConfig, out) -} - -// TestCodec_Decode_Nil makes sure that if Decode is called with an empty value, -// which may happen when a key is deleted, that no error occurs and instead a -// nil value is returned. -func TestCodec_Decode_Nil(t *testing.T) { - c := &yamlCodec{} - - input := [][]byte{nil, make([]byte, 0)} - for _, bb := range input { - out, err := c.Decode(bb) - require.Nil(t, err) - require.Nil(t, out) - } -} diff --git a/internal/static/metrics/instance/configstore/errors.go b/internal/static/metrics/instance/configstore/errors.go deleted file mode 100644 index 8d668a5ee3..0000000000 --- a/internal/static/metrics/instance/configstore/errors.go +++ /dev/null @@ -1,27 +0,0 @@ -package configstore - -import "fmt" - -// ErrNotConnected is used when a store operation was called but no connection -// to the store was active. -var ErrNotConnected = fmt.Errorf("not connected to store") - -// NotExistError is used when a config doesn't exist. -type NotExistError struct { - Key string -} - -// Error implements error. -func (e NotExistError) Error() string { - return fmt.Sprintf("configuration %s does not exist", e.Key) -} - -// NotUniqueError is used when two scrape jobs have the same name. -type NotUniqueError struct { - ScrapeJob string -} - -// Error implements error. -func (e NotUniqueError) Error() string { - return fmt.Sprintf("found multiple scrape configs in config store with job name %q", e.ScrapeJob) -} diff --git a/internal/static/metrics/instance/configstore/mock.go b/internal/static/metrics/instance/configstore/mock.go deleted file mode 100644 index 5ff303669c..0000000000 --- a/internal/static/metrics/instance/configstore/mock.go +++ /dev/null @@ -1,74 +0,0 @@ -package configstore - -import ( - "context" - - "github.com/grafana/agent/internal/static/metrics/instance" -) - -// Mock is a Mock Store. Useful primarily for testing. -type Mock struct { - ListFunc func(ctx context.Context) ([]string, error) - GetFunc func(ctx context.Context, key string) (instance.Config, error) - PutFunc func(ctx context.Context, c instance.Config) (created bool, err error) - DeleteFunc func(ctx context.Context, key string) error - AllFunc func(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) - WatchFunc func() <-chan WatchEvent - CloseFunc func() error -} - -// List implements Store. -func (s *Mock) List(ctx context.Context) ([]string, error) { - if s.ListFunc != nil { - return s.ListFunc(ctx) - } - panic("List not implemented") -} - -// Get implements Store. -func (s *Mock) Get(ctx context.Context, key string) (instance.Config, error) { - if s.GetFunc != nil { - return s.GetFunc(ctx, key) - } - panic("Get not implemented") -} - -// Put implements Store. -func (s *Mock) Put(ctx context.Context, c instance.Config) (created bool, err error) { - if s.PutFunc != nil { - return s.PutFunc(ctx, c) - } - panic("Put not implemented") -} - -// Delete implements Store. -func (s *Mock) Delete(ctx context.Context, key string) error { - if s.DeleteFunc != nil { - return s.DeleteFunc(ctx, key) - } - panic("Delete not implemented") -} - -// All implements Store. -func (s *Mock) All(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - if s.AllFunc != nil { - return s.AllFunc(ctx, keep) - } - panic("All not implemented") -} - -// Watch implements Store. -func (s *Mock) Watch() <-chan WatchEvent { - if s.WatchFunc != nil { - return s.WatchFunc() - } - panic("Watch not implemented") -} - -// Close implements Store. -func (s *Mock) Close() error { - if s.CloseFunc != nil { - return s.CloseFunc() - } - panic("Close not implemented") -} diff --git a/internal/static/metrics/instance/configstore/remote.go b/internal/static/metrics/instance/configstore/remote.go deleted file mode 100644 index a97df01e25..0000000000 --- a/internal/static/metrics/instance/configstore/remote.go +++ /dev/null @@ -1,471 +0,0 @@ -package configstore - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/grafana/dskit/instrument" - - "github.com/hashicorp/go-cleanhttp" - - "github.com/hashicorp/consul/api" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/kv" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -/*********************************************************************************************************************** -The consul code skipping the cortex handler is due to performance issue with a large number of configs and overloading -consul. See issue https://github.com/grafana/agent/issues/789. The long term method will be to refactor and extract -the cortex code so other stores can also benefit from this. @mattdurham -***********************************************************************************************************************/ - -var consulRequestDuration = instrument.NewHistogramCollector(promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "agent_configstore_consul_request_duration_seconds", - Help: "Time spent on consul requests when listing configs.", - Buckets: prometheus.DefBuckets, -}, []string{"operation", "status_code"})) - -// Remote loads instance files from a remote KV store. The KV store -// can be swapped out in real time. -type Remote struct { - log log.Logger - reg *util.Unregisterer - - kvMut sync.RWMutex - kv *agentRemoteClient - reloadKV chan struct{} - - cancelCtx context.Context - cancelFunc context.CancelFunc - - configsMut sync.Mutex - configsCh chan WatchEvent -} - -// agentRemoteClient is a simple wrapper to allow the shortcircuit of consul, while being backwards compatible with non -// consul kv stores -type agentRemoteClient struct { - kv.Client - consul *api.Client - config kv.Config -} - -// NewRemote creates a new Remote store that uses a Key-Value client to store -// and retrieve configs. If enable is true, the store will be immediately -// connected to. Otherwise, it can be lazily loaded by enabling later through -// a call to Remote.ApplyConfig. -func NewRemote(l log.Logger, reg prometheus.Registerer, cfg kv.Config, enable bool) (*Remote, error) { - cancelCtx, cancelFunc := context.WithCancel(context.Background()) - - r := &Remote{ - log: l, - reg: util.WrapWithUnregisterer(reg), - - reloadKV: make(chan struct{}, 1), - - cancelCtx: cancelCtx, - cancelFunc: cancelFunc, - - configsCh: make(chan WatchEvent), - } - if err := r.ApplyConfig(cfg, enable); err != nil { - return nil, fmt.Errorf("failed to apply config for config store: %w", err) - } - - go r.run() - return r, nil -} - -// ApplyConfig applies the config for a kv client. -func (r *Remote) ApplyConfig(cfg kv.Config, enable bool) error { - r.kvMut.Lock() - defer r.kvMut.Unlock() - - if r.cancelCtx.Err() != nil { - return fmt.Errorf("remote store already stopped") - } - - // Unregister all metrics that the previous kv may have registered. - r.reg.UnregisterAll() - - if !enable { - r.setClient(nil, nil, kv.Config{}) - return nil - } - - cli, err := kv.NewClient(cfg, GetCodec(), kv.RegistererWithKVName(r.reg, "agent_configs"), r.log) - // This is a hack to get a consul client, the client above has it embedded but it's not exposed - var consulClient *api.Client - if cfg.Store == "consul" { - consulClient, err = api.NewClient(&api.Config{ - Address: cfg.Consul.Host, - Token: cfg.Consul.ACLToken.String(), - Scheme: "http", - HttpClient: &http.Client{ - Transport: cleanhttp.DefaultPooledTransport(), - // See https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/ - Timeout: cfg.Consul.HTTPClientTimeout, - }, - }) - if err != nil { - return err - } - } - - if err != nil { - return fmt.Errorf("failed to create kv client: %w", err) - } - - r.setClient(cli, consulClient, cfg) - return nil -} - -// setClient sets the active client and notifies run to restart the -// kv watcher. -func (r *Remote) setClient(client kv.Client, consulClient *api.Client, config kv.Config) { - if client == nil && consulClient == nil { - r.kv = nil - } else { - r.kv = &agentRemoteClient{ - Client: client, - consul: consulClient, - config: config, - } - } - r.reloadKV <- struct{}{} -} - -func (r *Remote) run() { - var ( - kvContext context.Context - kvCancel context.CancelFunc - ) - -Outer: - for { - select { - case <-r.cancelCtx.Done(): - break Outer - case <-r.reloadKV: - r.kvMut.RLock() - kv := r.kv - r.kvMut.RUnlock() - - if kvCancel != nil { - kvCancel() - } - kvContext, kvCancel = context.WithCancel(r.cancelCtx) - go r.watchKV(kvContext, kv) - } - } - - if kvCancel != nil { - kvCancel() - } -} - -func (r *Remote) watchKV(ctx context.Context, client *agentRemoteClient) { - // Edge case: client was unset, nothing to do here. - if client == nil { - level.Info(r.log).Log("msg", "not watching the KV, none set") - return - } - - client.WatchPrefix(ctx, "", func(key string, v interface{}) bool { - if ctx.Err() != nil { - return false - } - - r.configsMut.Lock() - defer r.configsMut.Unlock() - - switch { - case v == nil: - r.configsCh <- WatchEvent{Key: key, Config: nil} - default: - cfg, err := instance.UnmarshalConfig(strings.NewReader(v.(string))) - if err != nil { - level.Error(r.log).Log("msg", "could not unmarshal config from store", "name", key, "err", err) - break - } - - r.configsCh <- WatchEvent{Key: key, Config: cfg} - } - - return true - }) -} - -// List returns the list of all configs in the KV store. -func (r *Remote) List(ctx context.Context) ([]string, error) { - r.kvMut.RLock() - defer r.kvMut.RUnlock() - if r.kv == nil { - return nil, ErrNotConnected - } - - return r.kv.List(ctx, "") -} - -// listConsul returns Key Value Pairs instead of []string -func (r *Remote) listConsul(ctx context.Context) (api.KVPairs, error) { - if r.kv == nil { - return nil, ErrNotConnected - } - - var pairs api.KVPairs - options := &api.QueryOptions{ - AllowStale: !r.kv.config.Consul.ConsistentReads, - RequireConsistent: r.kv.config.Consul.ConsistentReads, - } - // This is copied from cortex list so that stats stay the same - err := instrument.CollectedRequest(ctx, "List", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - pairs, _, err = r.kv.consul.KV().List(r.kv.config.Prefix, options.WithContext(ctx)) - return err - }) - - if err != nil { - return nil, err - } - // This mirrors the previous behavior of returning a blank array as opposed to nil. - if pairs == nil { - blankPairs := make(api.KVPairs, 0) - return blankPairs, nil - } - for _, kvp := range pairs { - kvp.Key = strings.TrimPrefix(kvp.Key, r.kv.config.Prefix) - } - return pairs, nil -} - -// Get retrieves an individual config from the KV store. -func (r *Remote) Get(ctx context.Context, key string) (instance.Config, error) { - r.kvMut.RLock() - defer r.kvMut.RUnlock() - if r.kv == nil { - return instance.Config{}, ErrNotConnected - } - - v, err := r.kv.Get(ctx, key) - if err != nil { - return instance.Config{}, fmt.Errorf("failed to get config %s: %w", key, err) - } else if v == nil { - return instance.Config{}, NotExistError{Key: key} - } - - cfg, err := instance.UnmarshalConfig(strings.NewReader(v.(string))) - if err != nil { - return instance.Config{}, fmt.Errorf("failed to unmarshal config %s: %w", key, err) - } - return *cfg, nil -} - -// Put adds or updates a config in the KV store. -func (r *Remote) Put(ctx context.Context, c instance.Config) (bool, error) { - // We need to use a write lock here since two Applies can't run concurrently - // (given the current need to perform a store-wide validation.) - r.kvMut.Lock() - defer r.kvMut.Unlock() - if r.kv == nil { - return false, ErrNotConnected - } - - bb, err := instance.MarshalConfig(&c, false) - if err != nil { - return false, fmt.Errorf("failed to marshal config: %w", err) - } - - cfgCh, err := r.all(ctx, nil) - if err != nil { - return false, fmt.Errorf("failed to check validity of config: %w", err) - } - if err := checkUnique(cfgCh, &c); err != nil { - return false, fmt.Errorf("failed to check uniqueness of config: %w", err) - } - - var created bool - err = r.kv.CAS(ctx, c.Name, func(in interface{}) (out interface{}, retry bool, err error) { - // The configuration is new if there's no previous value from the CAS - created = (in == nil) - return string(bb), false, nil - }) - if err != nil { - return false, fmt.Errorf("failed to put config: %w", err) - } - return created, nil -} - -// Delete deletes a config from the KV store. It returns NotExistError if -// the config doesn't exist. -func (r *Remote) Delete(ctx context.Context, key string) error { - r.kvMut.RLock() - defer r.kvMut.RUnlock() - if r.kv == nil { - return ErrNotConnected - } - - // Some KV stores don't return an error if something failed to be - // deleted, so we'll try to get it first. This isn't perfect, and - // it may fail, so we'll silently ignore any errors here unless - // we know for sure the config doesn't exist. - v, err := r.kv.Get(ctx, key) - if err != nil { - level.Warn(r.log).Log("msg", "error validating key existence for deletion", "err", err) - } else if v == nil { - return NotExistError{Key: key} - } - - err = r.kv.Delete(ctx, key) - if err != nil { - return fmt.Errorf("error deleting configuration: %w", err) - } - - return nil -} - -// All retrieves the set of all configs in the store. -func (r *Remote) All(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - r.kvMut.RLock() - defer r.kvMut.RUnlock() - return r.all(ctx, keep) -} - -// all can only be called if the kvMut lock is already held. -func (r *Remote) all(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - if r.kv == nil { - return nil, ErrNotConnected - } - - // If we are using a consul client then do the short circuit way, this is done so that we receive all the key value pairs - // in one call then, operate on them in memory. Previously we retrieved the list (which stripped the values) - // then ran a goroutine to get each individual value from consul. In situations with an extremely large number of - // configs this overloaded the consul instances. This reduces that to one call, that was being made anyways. - if r.kv.consul != nil { - return r.allConsul(ctx, keep) - } - - return r.allOther(ctx, keep) -} - -// allConsul is ONLY usable when consul is the keystore. This is a performance improvement in using the client directly -// -// instead of the cortex multi store kv interface. That interface returns the list then each value must be retrieved -// individually. This returns all the keys and values in one call and works on them in memory -func (r *Remote) allConsul(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - if r.kv.consul == nil { - level.Error(r.log).Log("err", "allConsul called but consul client nil") - return nil, errors.New("allConsul called but consul client nil") - } - var configs []*instance.Config - c := GetCodec() - - pairs, err := r.listConsul(ctx) - - if err != nil { - return nil, err - } - for _, kvp := range pairs { - if keep != nil && !keep(kvp.Key) { - level.Debug(r.log).Log("msg", "skipping key that was filtered out", "key", kvp.Key) - continue - } - value, err := c.Decode(kvp.Value) - if err != nil { - level.Error(r.log).Log("msg", "failed to decode config from store", "key", kvp.Key, "err", err) - continue - } - if value == nil { - // Config was deleted since we called list, skip it. - level.Debug(r.log).Log("msg", "skipping key that was deleted after list was called", "key", kvp.Key) - continue - } - - cfg, err := instance.UnmarshalConfig(strings.NewReader(value.(string))) - if err != nil { - level.Error(r.log).Log("msg", "failed to unmarshal config from store", "key", kvp.Key, "err", err) - continue - } - configs = append(configs, cfg) - } - ch := make(chan instance.Config, len(configs)) - for _, cfg := range configs { - ch <- *cfg - } - close(ch) - return ch, nil -} - -func (r *Remote) allOther(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) { - if r.kv == nil { - return nil, ErrNotConnected - } - - keys, err := r.kv.List(ctx, "") - if err != nil { - return nil, fmt.Errorf("failed to list configs: %w", err) - } - - ch := make(chan instance.Config) - - var wg sync.WaitGroup - wg.Add(len(keys)) - go func() { - wg.Wait() - close(ch) - }() - - for _, key := range keys { - go func(key string) { - defer wg.Done() - - if keep != nil && !keep(key) { - level.Debug(r.log).Log("msg", "skipping key that was filtered out", "key", key) - return - } - - // TODO(rfratto): retries might be useful here - v, err := r.kv.Get(ctx, key) - if err != nil { - level.Error(r.log).Log("msg", "failed to get config with key", "key", key, "err", err) - return - } else if v == nil { - // Config was deleted since we called list, skip it. - level.Debug(r.log).Log("msg", "skipping key that was deleted after list was called", "key", key) - return - } - - cfg, err := instance.UnmarshalConfig(strings.NewReader(v.(string))) - if err != nil { - level.Error(r.log).Log("msg", "failed to unmarshal config from store", "key", key, "err", err) - return - } - ch <- *cfg - }(key) - } - - return ch, nil -} - -// Watch watches the Store for changes. -func (r *Remote) Watch() <-chan WatchEvent { - return r.configsCh -} - -// Close closes the Remote store. -func (r *Remote) Close() error { - r.kvMut.Lock() - defer r.kvMut.Unlock() - r.cancelFunc() - return nil -} diff --git a/internal/static/metrics/instance/configstore/remote_test.go b/internal/static/metrics/instance/configstore/remote_test.go deleted file mode 100644 index 682438dd27..0000000000 --- a/internal/static/metrics/instance/configstore/remote_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package configstore - -import ( - "context" - "fmt" - "sort" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/kv" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" -) - -func TestRemote_List(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - cfgs := []string{"a", "b", "c"} - for _, cfg := range cfgs { - err := remote.kv.CAS(context.Background(), cfg, func(in interface{}) (out interface{}, retry bool, err error) { - return fmt.Sprintf("name: %s", cfg), false, nil - }) - require.NoError(t, err) - } - - list, err := remote.List(context.Background()) - require.NoError(t, err) - sort.Strings(list) - require.Equal(t, cfgs, list) -} - -func TestRemote_Get(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - err = remote.kv.CAS(context.Background(), "someconfig", func(in interface{}) (out interface{}, retry bool, err error) { - return "name: someconfig", false, nil - }) - require.NoError(t, err) - - cfg, err := remote.Get(context.Background(), "someconfig") - require.NoError(t, err) - - expect := instance.DefaultConfig - expect.Name = "someconfig" - require.Equal(t, expect, cfg) -} - -func TestRemote_Put(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - cfg := instance.DefaultConfig - cfg.Name = "newconfig" - - created, err := remote.Put(context.Background(), cfg) - require.NoError(t, err) - require.True(t, created) - - actual, err := remote.Get(context.Background(), "newconfig") - require.NoError(t, err) - require.Equal(t, cfg, actual) - - t.Run("Updating", func(t *testing.T) { - cfg := instance.DefaultConfig - cfg.Name = "newconfig" - cfg.HostFilter = true - - created, err := remote.Put(context.Background(), cfg) - require.NoError(t, err) - require.False(t, created) - }) -} - -func TestRemote_Put_NonUnique(t *testing.T) { - var ( - conflictingA = util.Untab(` -name: conflicting-a -scrape_configs: -- job_name: foobar - `) - conflictingB = util.Untab(` -name: conflicting-b -scrape_configs: -- job_name: fizzbuzz -- job_name: foobar - `) - ) - - conflictingACfg, err := instance.UnmarshalConfig(strings.NewReader(conflictingA)) - require.NoError(t, err) - - conflictingBCfg, err := instance.UnmarshalConfig(strings.NewReader(conflictingB)) - require.NoError(t, err) - - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - created, err := remote.Put(context.Background(), *conflictingACfg) - require.NoError(t, err) - require.True(t, created) - - _, err = remote.Put(context.Background(), *conflictingBCfg) - require.EqualError(t, err, fmt.Sprintf("failed to check uniqueness of config: found multiple scrape configs in config store with job name %q", "foobar")) -} - -func TestRemote_Delete(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - var cfg instance.Config - cfg.Name = "deleteme" - - created, err := remote.Put(context.Background(), cfg) - require.NoError(t, err) - require.True(t, created) - - err = remote.Delete(context.Background(), "deleteme") - require.NoError(t, err) - - _, err = remote.Get(context.Background(), "deleteme") - require.EqualError(t, err, "configuration deleteme does not exist") - - err = remote.Delete(context.Background(), "deleteme") - require.EqualError(t, err, "configuration deleteme does not exist") -} - -func TestRemote_All(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "all-configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - cfgs := []string{"a", "b", "c"} - for _, cfg := range cfgs { - err := remote.kv.CAS(context.Background(), cfg, func(in interface{}) (out interface{}, retry bool, err error) { - return fmt.Sprintf("name: %s", cfg), false, nil - }) - require.NoError(t, err) - } - - configCh, err := remote.All(context.Background(), nil) - require.NoError(t, err) - - var gotConfigs []string - for gotConfig := range configCh { - gotConfigs = append(gotConfigs, gotConfig.Name) - } - sort.Strings(gotConfigs) - - require.Equal(t, cfgs, gotConfigs) -} - -func TestRemote_Watch(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "watch-configs/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - _, err = remote.Put(context.Background(), instance.Config{Name: "watch"}) - require.NoError(t, err) - - select { - case cfg := <-remote.Watch(): - require.Equal(t, "watch", cfg.Key) - require.NotNil(t, cfg.Config) - require.Equal(t, "watch", cfg.Config.Name) - case <-time.After(3 * time.Second): - require.FailNow(t, "failed to watch for config") - } - - // Make sure Watch gets other updates. - _, err = remote.Put(context.Background(), instance.Config{Name: "watch2"}) - require.NoError(t, err) - - select { - case cfg := <-remote.Watch(): - require.Equal(t, "watch2", cfg.Key) - require.NotNil(t, cfg.Config) - require.Equal(t, "watch2", cfg.Config.Name) - case <-time.After(3 * time.Second): - require.FailNow(t, "failed to watch for config") - } -} - -func TestRemote_ApplyConfig(t *testing.T) { - remote, err := NewRemote(log.NewNopLogger(), prometheus.NewRegistry(), kv.Config{ - Store: "inmemory", - Prefix: "test-applyconfig/", - }, true) - require.NoError(t, err) - t.Cleanup(func() { - err := remote.Close() - require.NoError(t, err) - }) - - err = remote.ApplyConfig(kv.Config{ - Store: "inmemory", - Prefix: "test-applyconfig2/", - }, true) - require.NoError(t, err, "failed to apply a new config") - - err = remote.ApplyConfig(kv.Config{ - Store: "inmemory", - Prefix: "test-applyconfig2/", - }, true) - require.NoError(t, err, "failed to re-apply the current config") - - // Make sure watch still works - _, err = remote.Put(context.Background(), instance.Config{Name: "watch"}) - require.NoError(t, err) - - select { - case cfg := <-remote.Watch(): - require.Equal(t, "watch", cfg.Key) - require.NotNil(t, cfg.Config) - require.Equal(t, "watch", cfg.Config.Name) - case <-time.After(3 * time.Second): - require.FailNow(t, "failed to watch for config") - } -} diff --git a/internal/static/metrics/instance/configstore/store.go b/internal/static/metrics/instance/configstore/store.go deleted file mode 100644 index 799bafc882..0000000000 --- a/internal/static/metrics/instance/configstore/store.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package configstore abstracts the concepts of where instance files get -// retrieved. -package configstore - -import ( - "context" - - "github.com/grafana/agent/internal/static/metrics/instance" -) - -// Store is some interface to retrieving instance configurations. -type Store interface { - // List gets the list of config names. - List(ctx context.Context) ([]string, error) - - // Get gets an individual config by name. - Get(ctx context.Context, key string) (instance.Config, error) - - // Put applies a new instance Config to the store. - // If the config already exists, created will be false to indicate an - // update. - Put(ctx context.Context, c instance.Config) (created bool, err error) - - // Delete deletes a config from the store. - Delete(ctx context.Context, key string) error - - // All retrieves the entire list of instance configs currently - // in the store. A filtering "keep" function can be provided to ignore some - // configs, which can significantly speed up the operation in some cases. - All(ctx context.Context, keep func(key string) bool) (<-chan instance.Config, error) - - // Watch watches for changed instance Configs. - // All callers of Watch receive the same Channel. - // - // It is not guaranteed that Watch will emit all store events, and Watch - // should only be used for best-effort quick convergence with the remote - // store. Watch should always be paired with polling All. - Watch() <-chan WatchEvent - - // Close closes the store. - Close() error -} - -// WatchEvent is returned by Watch. The Key is the name of the config that was -// added, updated, or deleted. If the Config was deleted, Config will be nil. -type WatchEvent struct { - Key string - Config *instance.Config -} diff --git a/internal/static/metrics/instance/configstore/unique.go b/internal/static/metrics/instance/configstore/unique.go deleted file mode 100644 index 203b77ba45..0000000000 --- a/internal/static/metrics/instance/configstore/unique.go +++ /dev/null @@ -1,35 +0,0 @@ -package configstore - -import ( - "github.com/grafana/agent/internal/static/metrics/instance" -) - -// checkUnique validates that cfg is unique from all, ensuring that no two -// configs share a job_name. -func checkUnique(all <-chan instance.Config, cfg *instance.Config) error { - defer func() { - // Drain the channel, which is necessary if we're returning an error. - for range all { - } - }() - - newJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs)) - for _, sc := range cfg.ScrapeConfigs { - newJobNames[sc.JobName] = struct{}{} - } - - for otherConfig := range all { - // If the other config is the one we're validating, skip it. - if otherConfig.Name == cfg.Name { - continue - } - - for _, otherScrape := range otherConfig.ScrapeConfigs { - if _, exist := newJobNames[otherScrape.JobName]; exist { - return NotUniqueError{ScrapeJob: otherScrape.JobName} - } - } - } - - return nil -} diff --git a/internal/static/metrics/instance/errors.go b/internal/static/metrics/instance/errors.go deleted file mode 100644 index e025abf5bb..0000000000 --- a/internal/static/metrics/instance/errors.go +++ /dev/null @@ -1,44 +0,0 @@ -package instance - -import "fmt" - -// ErrInvalidUpdate is returned whenever Update is called against an instance -// but an invalid field is changed between configs. If ErrInvalidUpdate is -// returned, the instance must be fully stopped and replaced with a new one -// with the new config. -type ErrInvalidUpdate struct { - Inner error -} - -// Error implements the error interface. -func (e ErrInvalidUpdate) Error() string { return e.Inner.Error() } - -// Is returns true if err is an ErrInvalidUpdate. -func (e ErrInvalidUpdate) Is(err error) bool { - switch err.(type) { - case ErrInvalidUpdate, *ErrInvalidUpdate: - return true - default: - return false - } -} - -// As will set the err object to ErrInvalidUpdate provided err -// is a pointer to ErrInvalidUpdate. -func (e ErrInvalidUpdate) As(err interface{}) bool { - switch v := err.(type) { - case *ErrInvalidUpdate: - *v = e - default: - return false - } - return true -} - -// errImmutableField is the error describing a field that cannot be changed. It -// is wrapped inside of a ErrInvalidUpdate. -type errImmutableField struct{ Field string } - -func (e errImmutableField) Error() string { - return fmt.Sprintf("%s cannot be changed dynamically", e.Field) -} diff --git a/internal/static/metrics/instance/group_manager.go b/internal/static/metrics/instance/group_manager.go deleted file mode 100644 index 072675d92f..0000000000 --- a/internal/static/metrics/instance/group_manager.go +++ /dev/null @@ -1,358 +0,0 @@ -package instance - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "sort" - "sync" - - "github.com/prometheus/prometheus/config" -) - -// A GroupManager wraps around another Manager and groups all incoming Configs -// into a smaller set of configs, causing less managed instances to be spawned. -// -// Configs are grouped by all settings for a Config *except* scrape configs. -// Any difference found in any flag will cause a Config to be placed in another -// group. One exception to this rule is that remote_writes are compared -// unordered, but the sets of remote_writes should otherwise be identical. -// -// GroupManagers drastically improve the performance of the Agent when a -// significant number of instances are spawned, as the overhead of each -// instance having its own service discovery, WAL, and remote_write can be -// significant. -// -// The config names of instances within the group will be represented by -// that group's hash of settings. -type GroupManager struct { - inner Manager - - mtx sync.Mutex - - // groups is a map of group name to the grouped configs. - groups map[string]groupedConfigs - - // groupLookup is a map of config name to group name. - groupLookup map[string]string -} - -// groupedConfigs holds a set of grouped configs, keyed by the config name. -// They are stored in a map rather than a slice to make overriding an existing -// config within the group less error-prone. -type groupedConfigs map[string]Config - -// Copy returns a shallow copy of the groupedConfigs. -func (g groupedConfigs) Copy() groupedConfigs { - res := make(groupedConfigs, len(g)) - for k, v := range g { - res[k] = v - } - return res -} - -// NewGroupManager creates a new GroupManager for combining instances of the -// same "group." -func NewGroupManager(inner Manager) *GroupManager { - return &GroupManager{ - inner: inner, - groups: make(map[string]groupedConfigs), - groupLookup: make(map[string]string), - } -} - -// GetInstance gets the underlying grouped instance for a given name. -func (m *GroupManager) GetInstance(name string) (ManagedInstance, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - group, ok := m.groupLookup[name] - if !ok { - return nil, fmt.Errorf("instance %s does not exist", name) - } - - inst, err := m.inner.GetInstance(group) - if err != nil { - return nil, fmt.Errorf("failed to get instance for %s: %w", name, err) - } - return inst, nil -} - -// ListInstances returns all currently grouped managed instances. The key -// will be the group's hash of shared settings. -func (m *GroupManager) ListInstances() map[string]ManagedInstance { - return m.inner.ListInstances() -} - -// ListConfigs returns the UNGROUPED instance configs with their original -// settings. To see the grouped instances, call ListInstances instead. -func (m *GroupManager) ListConfigs() map[string]Config { - m.mtx.Lock() - defer m.mtx.Unlock() - - cfgs := make(map[string]Config) - for _, groupedConfigs := range m.groups { - for _, cfg := range groupedConfigs { - cfgs[cfg.Name] = cfg - } - } - return cfgs -} - -// ApplyConfig will determine the group of the Config before applying it to -// the group. If no group exists, one will be created. If a group already -// exists, the group will have its settings merged with the Config and -// will be updated. -func (m *GroupManager) ApplyConfig(c Config) error { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.applyConfig(c) -} - -func (m *GroupManager) applyConfig(c Config) (err error) { - groupName, err := hashConfig(c) - if err != nil { - return fmt.Errorf("failed to get group name for config %s: %w", c.Name, err) - } - - grouped := m.groups[groupName] - if grouped == nil { - grouped = make(groupedConfigs) - } else { - grouped = grouped.Copy() - } - - // Add the config to the group. If the config already exists within this - // group, it'll be overwritten. - grouped[c.Name] = c - mergedConfig, err := groupConfigs(groupName, grouped) - if err != nil { - err = fmt.Errorf("failed to group configs for %s: %w", c.Name, err) - return - } - - // If this config already exists in another group, we have to delete it. - // If we can't delete it from the old group, we also can't apply it. - if oldGroup, ok := m.groupLookup[c.Name]; ok && oldGroup != groupName { - // There's a few cases here where if something fails, it's safer to crash - // out and restart the Agent from scratch than it would be to continue as - // normal. The panics here are for truly exceptional cases, otherwise if - // something is recoverable, we'll return an error like normal. - - // If we can't find the old config, something got messed up when applying - // the config. But it also means that we're not going to be able to restore - // the config if something fails. Preemptively we should panic, since the - // internal state has gotten messed up and can't be fixed. - oldConfig, ok := m.groups[oldGroup][c.Name] - if !ok { - panic("failed to properly move config to new group. THIS IS A BUG!") - } - - err = m.deleteConfig(c.Name) - if err != nil { - err = fmt.Errorf("cannot apply config %s because deleting it from the old group failed: %w", c.Name, err) - return - } - - // Now that the config is deleted, we need to restore it in case applying - // the new one happens to fail. - defer func() { - if err == nil { - return - } - - // If restoring a config fails, we've left the Agent in a really bad - // state: the new config can't be applied and the old config can't be - // brought back. Just crash and let the Agent start fresh. - // - // Restoring the config _shouldn't_ fail here since applies only fail - // if the config is invalid. Since the config was running before, it - // should already be valid. If it does happen to fail, though, the - // internal state is left corrupted since we've completely lost a - // config. - restoreError := m.applyConfig(oldConfig) - if restoreError != nil { - panic(fmt.Sprintf("failed to properly restore config. THIS IS A BUG! error: %s", restoreError)) - } - }() - } - - err = m.inner.ApplyConfig(mergedConfig) - if err != nil { - err = fmt.Errorf("failed to apply grouped configs for config %s: %w", c.Name, err) - return - } - - // If the inner apply succeeded, we can update our group and the lookup. - m.groups[groupName] = grouped - m.groupLookup[c.Name] = groupName - return -} - -// DeleteConfig will remove a Config from its associated group. If there are -// no more Configs within that group after this Config is deleted, the managed -// instance will be stopped. Otherwise, the managed instance will be updated -// with the new grouped Config that doesn't include the removed one. -func (m *GroupManager) DeleteConfig(name string) error { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.deleteConfig(name) -} - -func (m *GroupManager) deleteConfig(name string) error { - groupName, ok := m.groupLookup[name] - if !ok { - return fmt.Errorf("config does not exist") - } - - // Grab a copy of the stored group and delete our entry. We can - // persist it after we successfully remove the config. - group := m.groups[groupName].Copy() - delete(group, name) - - if len(group) == 0 { - // We deleted the last remaining config in that group; we can delete it in - // its entirety now. - if err := m.inner.DeleteConfig(groupName); err != nil { - return fmt.Errorf("failed to delete empty group %s after removing config %s: %w", groupName, name, err) - } - } else { - // We deleted the config but there's still more in the group; apply the new - // group that holds the remainder of the configs (minus the one we just - // deleted). - mergedConfig, err := groupConfigs(groupName, group) - if err != nil { - return fmt.Errorf("failed to regroup configs without %s: %w", name, err) - } - - err = m.inner.ApplyConfig(mergedConfig) - if err != nil { - return fmt.Errorf("failed to apply new group without %s: %w", name, err) - } - } - - // Update the stored group and remove the entry from the lookup table. - if len(group) == 0 { - delete(m.groups, groupName) - } else { - m.groups[groupName] = group - } - - delete(m.groupLookup, name) - return nil -} - -// Stop stops the Manager and all of its managed instances. -func (m *GroupManager) Stop() { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.inner.Stop() - m.groupLookup = make(map[string]string) - m.groups = make(map[string]groupedConfigs) -} - -// hashConfig determines the hash of a Config used for grouping. It ignores -// the name and scrape_configs and also orders remote_writes by name prior to -// hashing. -func hashConfig(c Config) (string, error) { - // We need a deep copy since we're going to mutate the remote_write - // pointers. - groupable, err := c.Clone() - if err != nil { - return "", err - } - - // Ignore name and scrape configs when hashing - groupable.Name = "" - groupable.ScrapeConfigs = nil - - // Assign names to remote_write configs if they're not present already. - // This is also done in AssignDefaults but is duplicated here for the sake - // of simplifying responsibility of GroupManager. - for _, cfg := range groupable.RemoteWrite { - if cfg != nil { - // We don't care if the names are different, just that the other settings - // are the same. Blank out the name here before hashing the remote - // write config. - cfg.Name = "" - - hash, err := getHash(cfg) - if err != nil { - return "", err - } - cfg.Name = hash[:6] - } - } - - // Now sort remote_writes by name and nil-ness. - sort.Slice(groupable.RemoteWrite, func(i, j int) bool { - switch { - case groupable.RemoteWrite[i] == nil: - return true - case groupable.RemoteWrite[j] == nil: - return false - default: - return groupable.RemoteWrite[i].Name < groupable.RemoteWrite[j].Name - } - }) - - bb, err := MarshalConfig(&groupable, false) - if err != nil { - return "", err - } - hash := md5.Sum(bb) - return hex.EncodeToString(hash[:]), nil -} - -// groupConfig creates a grouped Config where all fields are copied from -// the first config except for scrape_configs, which are appended together. -func groupConfigs(groupName string, grouped groupedConfigs) (Config, error) { - if len(grouped) == 0 { - return Config{}, fmt.Errorf("no configs") - } - - // Move the map into a slice and sort it by name so this function - // consistently does the same thing. - cfgs := make([]Config, 0, len(grouped)) - for _, cfg := range grouped { - cfgs = append(cfgs, cfg) - } - sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].Name < cfgs[j].Name }) - - combined, err := cfgs[0].Clone() - if err != nil { - return Config{}, err - } - combined.Name = groupName - combined.ScrapeConfigs = []*config.ScrapeConfig{} - - // Assign all remote_write configs in the group a consistent set of remote_names. - // If the grouped configs are coming from the scraping service, defaults will have - // been applied and the remote names will be prefixed with the old instance config name. - for _, rwc := range combined.RemoteWrite { - // Blank out the existing name before getting the hash so it doesn't take into - // account any existing name. - rwc.Name = "" - - hash, err := getHash(rwc) - if err != nil { - return Config{}, err - } - - rwc.Name = groupName[:6] + "-" + hash[:6] - } - - // Combine all the scrape configs. It's possible that two different ungrouped - // configs had a matching job name, but this will be detected and rejected - // (as it should be) when the underlying Manager eventually validates the - // combined config. - // - // TODO(rfratto): should we prepend job names with the name of the original - // config? (e.g., job_name = "config_name/job_name"). - for _, cfg := range cfgs { - combined.ScrapeConfigs = append(combined.ScrapeConfigs, cfg.ScrapeConfigs...) - } - - return combined, nil -} diff --git a/internal/static/metrics/instance/group_manager_test.go b/internal/static/metrics/instance/group_manager_test.go deleted file mode 100644 index 48b87236f4..0000000000 --- a/internal/static/metrics/instance/group_manager_test.go +++ /dev/null @@ -1,446 +0,0 @@ -package instance - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGroupManager_ListInstances_Configs(t *testing.T) { - gm := NewGroupManager(newFakeManager()) - - // Create two configs in the same group and one in another - // group. - configs := []string{ - ` -name: configA -scrape_configs: [] -remote_write: []`, - ` -name: configB -scrape_configs: [] -remote_write: []`, - ` -name: configC -scrape_configs: [] -remote_write: -- url: http://localhost:9090`, - } - - for _, cfg := range configs { - c := testUnmarshalConfig(t, cfg) - err := gm.ApplyConfig(c) - require.NoError(t, err) - } - - // ListInstances should return our grouped instances - insts := gm.ListInstances() - require.Equal(t, 2, len(insts)) - - // ...but ListConfigs should return the ungrouped configs. - confs := gm.ListConfigs() - require.Equal(t, 3, len(confs)) - require.Containsf(t, confs, "configA", "configA not in confs") - require.Containsf(t, confs, "configB", "configB not in confs") - require.Containsf(t, confs, "configC", "configC not in confs") -} - -func testUnmarshalConfig(t *testing.T, cfg string) Config { - c, err := UnmarshalConfig(strings.NewReader(cfg)) - require.NoError(t, err) - return *c -} - -func TestGroupManager_ApplyConfig(t *testing.T) { - t.Run("combining configs", func(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: [] -remote_write: [] -`)) - require.NoError(t, err) - - err = gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configB -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`)) - require.NoError(t, err) - - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 2, len(gm.groupLookup)) - - // Check the underlying grouped config and make sure it was updated. - expect := testUnmarshalConfig(t, fmt.Sprintf(` -name: %s -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`, gm.groupLookup["configA"])) - - innerConfigs := inner.ListConfigs() - require.Equal(t, 1, len(innerConfigs)) - require.Equal(t, expect, innerConfigs[gm.groupLookup["configA"]]) - }) - - t.Run("updating existing config within group", func(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: [] -remote_write: [] -`)) - require.NoError(t, err) - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - - err = gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`)) - require.NoError(t, err) - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - - // Check the underlying grouped config and make sure it was updated. - expect := testUnmarshalConfig(t, fmt.Sprintf(` -name: %s -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`, gm.groupLookup["configA"])) - actual := inner.ListConfigs()[gm.groupLookup["configA"]] - require.Equal(t, expect, actual) - }) - - t.Run("updating existing config to new group", func(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: [] -remote_write: [] -`)) - require.NoError(t, err) - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - oldGroup := gm.groupLookup["configA"] - - // Reapply the config but give it a setting change that would - // force it into a new group. We should still have only one - // group and only one entry in the group lookup table. - err = gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -host_filter: true -scrape_configs: [] -remote_write: [] -`)) - require.NoError(t, err) - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - newGroup := gm.groupLookup["configA"] - - // Check the underlying grouped config and make sure it was updated. - expect := testUnmarshalConfig(t, fmt.Sprintf(` -name: %s -host_filter: true -scrape_configs: [] -remote_write: [] -`, gm.groupLookup["configA"])) - actual := inner.ListConfigs()[newGroup] - require.Equal(t, expect, actual) - - // The old underlying ngroup should be gone. - require.NotContains(t, inner.ListConfigs(), oldGroup) - require.Equal(t, 1, len(inner.ListConfigs())) - }) -} - -func TestGroupManager_ApplyConfig_RemoteWriteName(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: [] -remote_write: -- name: rw-cfg-a - url: http://localhost:9009/api/prom/push -`)) - require.NoError(t, err) - - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - - // Check the underlying grouped config and make sure the group_name - // didn't get copied from the remote_name of A. - innerConfigs := inner.ListConfigs() - require.Equal(t, 1, len(innerConfigs)) - - cfg := innerConfigs[gm.groupLookup["configA"]] - require.NotEqual(t, "rw-cfg-a", cfg.RemoteWrite[0].Name) -} - -func TestGroupManager_DeleteConfig(t *testing.T) { - t.Run("partial delete", func(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - - // Apply two configs in the same group and then delete one. The group - // should still be active with the one config inside of it. - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`)) - require.NoError(t, err) - - err = gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configB -scrape_configs: -- job_name: test_job2 - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`)) - require.NoError(t, err) - - err = gm.DeleteConfig("configA") - require.NoError(t, err) - - expect := testUnmarshalConfig(t, fmt.Sprintf(` -name: %s -scrape_configs: -- job_name: test_job2 - static_configs: - - targets: [127.0.0.1:12345] -remote_write: []`, gm.groupLookup["configB"])) - actual := inner.ListConfigs()[gm.groupLookup["configB"]] - require.Equal(t, expect, actual) - require.Equal(t, 1, len(gm.groups)) - require.Equal(t, 1, len(gm.groupLookup)) - }) - - t.Run("full delete", func(t *testing.T) { - inner := newFakeManager() - gm := NewGroupManager(inner) - - // Apply a single config but delete the entire group. - err := gm.ApplyConfig(testUnmarshalConfig(t, ` -name: configA -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: [] -`)) - require.NoError(t, err) - - err = gm.DeleteConfig("configA") - require.NoError(t, err) - require.Equal(t, 0, len(inner.ListConfigs())) - require.Equal(t, 0, len(inner.ListInstances())) - require.Equal(t, 0, len(gm.groups)) - require.Equal(t, 0, len(gm.groupLookup)) - }) -} - -func newFakeManager() Manager { - instances := make(map[string]ManagedInstance) - configs := make(map[string]Config) - - return &MockManager{ - ListInstancesFunc: func() map[string]ManagedInstance { - return instances - }, - ListConfigsFunc: func() map[string]Config { - return configs - }, - ApplyConfigFunc: func(c Config) error { - instances[c.Name] = &mockInstance{} - configs[c.Name] = c - return nil - }, - DeleteConfigFunc: func(name string) error { - delete(instances, name) - delete(configs, name) - return nil - }, - StopFunc: func() {}, - } -} - -func Test_hashConfig(t *testing.T) { - t.Run("name and scrape configs are ignored", func(t *testing.T) { - configAText := ` -name: configA -scrape_configs: [] -remote_write: []` - - configBText := ` -name: configB -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: []` - - hashA, hashB := getHashesFromConfigs(t, configAText, configBText) - require.Equal(t, hashA, hashB) - }) - - t.Run("remote_writes are unordered", func(t *testing.T) { - configAText := ` -name: configA -scrape_configs: [] -remote_write: -- url: http://localhost:9009/api/prom/push1 -- url: http://localhost:9009/api/prom/push2` - - configBText := ` -name: configB -scrape_configs: [] -remote_write: -- url: http://localhost:9009/api/prom/push2 -- url: http://localhost:9009/api/prom/push1` - - hashA, hashB := getHashesFromConfigs(t, configAText, configBText) - require.Equal(t, hashA, hashB) - }) - - t.Run("remote_writes must match", func(t *testing.T) { - configAText := ` -name: configA -scrape_configs: [] -remote_write: -- url: http://localhost:9009/api/prom/push1 -- url: http://localhost:9009/api/prom/push2` - - configBText := ` -name: configB -scrape_configs: [] -remote_write: -- url: http://localhost:9009/api/prom/push1 -- url: http://localhost:9009/api/prom/push1` - - hashA, hashB := getHashesFromConfigs(t, configAText, configBText) - require.NotEqual(t, hashA, hashB) - }) - - t.Run("other fields must match", func(t *testing.T) { - configAText := ` -name: configA -host_filter: true -scrape_configs: [] -remote_write: []` - - configBText := ` -name: configB -host_filter: false -scrape_configs: [] -remote_write: []` - - hashA, hashB := getHashesFromConfigs(t, configAText, configBText) - require.NotEqual(t, hashA, hashB) - }) -} - -func getHashesFromConfigs(t *testing.T, configAText, configBText string) (string, string) { - configA := testUnmarshalConfig(t, configAText) - configB := testUnmarshalConfig(t, configBText) - - hashA, err := hashConfig(configA) - require.NoError(t, err) - - hashB, err := hashConfig(configB) - require.NoError(t, err) - - return hashA, hashB -} - -func Test_groupConfigs(t *testing.T) { - configAText := ` -name: configA -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -remote_write: -- url: http://localhost:9009/api/prom/push1 -- url: http://localhost:9009/api/prom/push2` - - configBText := ` -name: configB -scrape_configs: -- job_name: test_job2 - static_configs: - - targets: [127.0.0.1:12345] -remote_write: -- url: http://localhost:9009/api/prom/push2 -- url: http://localhost:9009/api/prom/push1` - - configA := testUnmarshalConfig(t, configAText) - configB := testUnmarshalConfig(t, configBText) - - groupName, err := hashConfig(configA) - require.NoError(t, err) - - expectText := fmt.Sprintf(` -name: %s -scrape_configs: -- job_name: test_job - static_configs: - - targets: [127.0.0.1:12345] -- job_name: test_job2 - static_configs: - - targets: [127.0.0.1:12345] -remote_write: -- url: http://localhost:9009/api/prom/push1 -- url: http://localhost:9009/api/prom/push2`, groupName) - - expect, err := UnmarshalConfig(strings.NewReader(expectText)) - require.NoError(t, err) - - // Generate expected remote_write names - for _, rwConfig := range expect.RemoteWrite { - hash, err := getHash(rwConfig) - require.NoError(t, err) - rwConfig.Name = groupName[:6] + "-" + hash[:6] - } - - group := groupedConfigs{ - "configA": configA, - "configB": configB, - } - actual, err := groupConfigs(groupName, group) - require.NoError(t, err) - require.Equal(t, *expect, actual) - - // Consistency check: groupedConfigs is a map and we want to always have - // groupConfigs return the same thing regardless of how the map - // is iterated over. Run through groupConfigs a bunch of times and - // make sure it always returns the same thing. - for i := 0; i < 100; i++ { - actual, err = groupConfigs(groupName, group) - require.NoError(t, err) - require.Equal(t, *expect, actual) - } -} diff --git a/internal/static/metrics/instance/host_filter.go b/internal/static/metrics/instance/host_filter.go deleted file mode 100644 index 2328f6feff..0000000000 --- a/internal/static/metrics/instance/host_filter.go +++ /dev/null @@ -1,238 +0,0 @@ -package instance - -import ( - "context" - "fmt" - "net" - "sync" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery/kubernetes" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/relabel" -) - -// HostFilterLabelMatchers are the set of labels that will be used to match -// against an incoming target. -var HostFilterLabelMatchers = []string{ - // Consul - "__meta_consul_node", - - // Dockerswarm - "__meta_dockerswarm_node_id", - "__meta_dockerswarm_node_hostname", - "__meta_dockerswarm_node_address", - - // Kubernetes node labels. Labels for `role: service` are omitted as - // service targets have labels merged with discovered pods. - "__meta_kubernetes_pod_node_name", - "__meta_kubernetes_node_name", - - // Generic (applied by host_filter_relabel_configs) - "__host__", -} - -// DiscoveredGroups is a set of groups found via service discovery. -type DiscoveredGroups = map[string][]*targetgroup.Group - -// GroupChannel is a channel that provides discovered target groups. -type GroupChannel = <-chan DiscoveredGroups - -// HostFilter acts as a MITM between the discovery manager and the -// scrape manager, filtering out discovered targets that are not -// running on the same node as the agent itself. -type HostFilter struct { - ctx context.Context - cancel context.CancelFunc - - host string - - inputCh GroupChannel - outputCh chan map[string][]*targetgroup.Group - - relabelMut sync.Mutex - relabels []*relabel.Config -} - -// NewHostFilter creates a new HostFilter. -func NewHostFilter(host string, relabels []*relabel.Config) *HostFilter { - ctx, cancel := context.WithCancel(context.Background()) - f := &HostFilter{ - ctx: ctx, - cancel: cancel, - - host: host, - relabels: relabels, - - outputCh: make(chan map[string][]*targetgroup.Group), - } - return f -} - -// PatchSD patches services discoveries to optimize performance for host -// filtering. The discovered targets will be pruned to as close to the set -// that HostFilter will output as possible. -func (f *HostFilter) PatchSD(scrapes []*config.ScrapeConfig) { - for _, sc := range scrapes { - for _, d := range sc.ServiceDiscoveryConfigs { - switch d := d.(type) { - case *kubernetes.SDConfig: - if d.Role == kubernetes.RolePod { - d.Selectors = []kubernetes.SelectorConfig{{ - Role: kubernetes.RolePod, - Field: fmt.Sprintf("spec.nodeName=%s", f.host), - }} - } - } - } - } -} - -// SetRelabels updates the relabeling rules used by the HostFilter. -func (f *HostFilter) SetRelabels(relabels []*relabel.Config) { - f.relabelMut.Lock() - defer f.relabelMut.Unlock() - f.relabels = relabels -} - -// Run starts the HostFilter. It only exits when the HostFilter is stopped. -// Run will continually read from syncCh and filter groups discovered down to -// targets that are colocated on the same node as the one the HostFilter is -// running in. -func (f *HostFilter) Run(syncCh GroupChannel) { - f.inputCh = syncCh - - for { - select { - case <-f.ctx.Done(): - return - case data := <-f.inputCh: - f.relabelMut.Lock() - relabels := f.relabels - f.relabelMut.Unlock() - - f.outputCh <- FilterGroups(data, f.host, relabels) - } - } -} - -// Stop stops the host filter from processing more target updates. -func (f *HostFilter) Stop() { - f.cancel() -} - -// SyncCh returns a read only channel used by all the clients to receive -// target updates. -func (f *HostFilter) SyncCh() GroupChannel { - return f.outputCh -} - -// FilterGroups takes a set of DiscoveredGroups as input and filters out -// any Target that is not running on the host machine provided by host. -// -// This is done by looking at HostFilterLabelMatchers and __address__. -// -// If the discovered address is localhost or 127.0.0.1, the group is never -// filtered out. -func FilterGroups(in DiscoveredGroups, host string, configs []*relabel.Config) DiscoveredGroups { - out := make(DiscoveredGroups, len(in)) - - for name, groups := range in { - groupList := make([]*targetgroup.Group, 0, len(groups)) - - for _, group := range groups { - newGroup := &targetgroup.Group{ - Targets: make([]model.LabelSet, 0, len(group.Targets)), - Labels: group.Labels, - Source: group.Source, - } - - for _, target := range group.Targets { - allLabels := mergeSets(target, group.Labels) - processedLabels, _ := relabel.Process(toLabelSlice(allLabels), configs...) - - if !shouldFilterTarget(processedLabels, host) { - newGroup.Targets = append(newGroup.Targets, target) - } - } - - groupList = append(groupList, newGroup) - } - - out[name] = groupList - } - - return out -} - -// shouldFilterTarget returns true when the target labels (combined with the set of common -// labels) should be filtered out by FilterGroups. -func shouldFilterTarget(lbls labels.Labels, host string) bool { - shouldFilterTargetByLabelValue := func(labelValue string) bool { - if addr, _, err := net.SplitHostPort(labelValue); err == nil { - labelValue = addr - } - - // Special case: always allow localhost/127.0.0.1 - if labelValue == "localhost" || labelValue == "127.0.0.1" { - return false - } - - return labelValue != host - } - - lset := labels.New(lbls...) - addressLabel := lset.Get(model.AddressLabel) - if addressLabel == "" { - // No address label. This is invalid and will generate an error by the scrape - // manager, so we'll pass it on for now. - return false - } - - // If the __address__ label matches, we can quit early. - if !shouldFilterTargetByLabelValue(addressLabel) { - return false - } - - // Fall back to checking metalabels as long as their values are nonempty. - for _, check := range HostFilterLabelMatchers { - // If any of the checked labels match for not being filtered out, we can - // return before checking any of the other matchers. - if addr := lset.Get(check); addr != "" && !shouldFilterTargetByLabelValue(addr) { - return false - } - } - - // Nothing matches, filter it out. - return true -} - -// mergeSets merges the sets of labels together. Earlier sets take priority for label names. -func mergeSets(sets ...model.LabelSet) model.LabelSet { - sz := 0 - for _, set := range sets { - sz += len(set) - } - result := make(model.LabelSet, sz) - - for _, set := range sets { - for labelName, labelValue := range set { - if _, exist := result[labelName]; exist { - continue - } - result[labelName] = labelValue - } - } - - return result -} - -func toLabelSlice(set model.LabelSet) labels.Labels { - slice := make(labels.Labels, 0, len(set)) - for name, value := range set { - slice = append(slice, labels.Label{Name: string(name), Value: string(value)}) - } - return slice -} diff --git a/internal/static/metrics/instance/host_filter_test.go b/internal/static/metrics/instance/host_filter_test.go deleted file mode 100644 index 8eca3a3f51..0000000000 --- a/internal/static/metrics/instance/host_filter_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package instance - -import ( - "testing" - - "github.com/grafana/agent/internal/util" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/model/relabel" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -func makeGroup(labels []model.LabelSet) *targetgroup.Group { - return &targetgroup.Group{ - Targets: labels, - Labels: model.LabelSet{}, - } -} - -func TestFilterGroups(t *testing.T) { - tt := []struct { - name string - labelHost string - inputHost string - shouldRemove bool - }{ - { - name: "complete match", - labelHost: "myhost", - inputHost: "myhost", - shouldRemove: false, - }, - { - name: "mismatch", - labelHost: "notmyhost", - inputHost: "myhost", - shouldRemove: true, - }, - { - name: "match with port", - labelHost: "myhost:12345", - inputHost: "myhost", - shouldRemove: false, - }, - { - name: "mismatch with port", - labelHost: "notmyhost:12345", - inputHost: "myhost", - shouldRemove: true, - }, - } - - // Sets of labels we want to test against. - labels := []model.LabelName{ - model.AddressLabel, - model.LabelName("__meta_consul_node"), - model.LabelName("__meta_dockerswarm_node_id"), - model.LabelName("__meta_dockerswarm_node_hostname"), - model.LabelName("__meta_dockerswarm_node_address"), - model.LabelName("__meta_kubernetes_pod_node_name"), - model.LabelName("__meta_kubernetes_node_name"), - model.LabelName("__host__"), - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - for _, label := range labels { - t.Run(string(label), func(t *testing.T) { - lset := model.LabelSet{ - label: model.LabelValue(tc.labelHost), - } - - // Special case: if label is not model.AddressLabel, we need to give - // it a fake value. model.AddressLabel is always expected to be present and - // is considered an error if it isn't. - if label != model.AddressLabel { - lset[model.AddressLabel] = "fake" - } - - group := makeGroup([]model.LabelSet{lset}) - - groups := DiscoveredGroups{"test": []*targetgroup.Group{group}} - result := FilterGroups(groups, tc.inputHost, nil) - - require.NotNil(t, result["test"]) - if tc.shouldRemove { - require.NotEqual(t, len(result["test"][0].Targets), len(groups["test"][0].Targets)) - } else { - require.Equal(t, len(result["test"][0].Targets), len(groups["test"][0].Targets)) - } - }) - } - }) - } -} - -func TestFilterGroups_Relabel(t *testing.T) { - tt := []struct { - name string - labelHost string - inputHost string - shouldRemove bool - }{ - { - name: "complete match", - labelHost: "myhost", - inputHost: "myhost", - shouldRemove: false, - }, - { - name: "mismatch", - labelHost: "notmyhost", - inputHost: "myhost", - shouldRemove: true, - }, - { - name: "match with port", - labelHost: "myhost:12345", - inputHost: "myhost", - shouldRemove: false, - }, - { - name: "mismatch with port", - labelHost: "notmyhost:12345", - inputHost: "myhost", - shouldRemove: true, - }, - } - - relabelConfig := []*relabel.Config{{ - SourceLabels: model.LabelNames{"__internal_label"}, - Action: relabel.Replace, - Separator: ";", - Regex: relabel.MustNewRegexp("(.*)"), - Replacement: "$1", - TargetLabel: "__host__", - }} - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - lset := model.LabelSet{ - model.AddressLabel: "fake_target", - "__internal_label": model.LabelValue(tc.labelHost), - } - - group := makeGroup([]model.LabelSet{lset}) - - groups := DiscoveredGroups{"test": []*targetgroup.Group{group}} - result := FilterGroups(groups, tc.inputHost, relabelConfig) - - require.NotNil(t, result["test"]) - if tc.shouldRemove { - require.NotEqual(t, len(result["test"][0].Targets), len(groups["test"][0].Targets)) - } else { - require.Equal(t, len(result["test"][0].Targets), len(groups["test"][0].Targets)) - } - }) - } -} - -func TestHostFilter_PatchSD(t *testing.T) { - rawInput := util.Untab(` -- job_name: default - kubernetes_sd_configs: - - role: service - - role: pod`) - - expect := util.Untab(` -- job_name: default - honor_timestamps: true - metrics_path: /metrics - scheme: http - track_timestamps_staleness: false - follow_redirects: true - enable_http2: true - kubernetes_sd_configs: - - role: service - kubeconfig_file: "" - follow_redirects: true - enable_http2: true - - role: pod - follow_redirects: true - enable_http2: true - kubeconfig_file: "" - selectors: - - role: pod - field: spec.nodeName=myhost - `) - - var input []*config.ScrapeConfig - err := yaml.Unmarshal([]byte(rawInput), &input) - require.NoError(t, err) - - NewHostFilter("myhost", nil).PatchSD(input) - - output, err := yaml.Marshal(input) - require.NoError(t, err) - require.YAMLEq(t, expect, string(output)) -} diff --git a/internal/static/metrics/instance/instance.go b/internal/static/metrics/instance/instance.go index a0de217627..db8e22109c 100644 --- a/internal/static/metrics/instance/instance.go +++ b/internal/static/metrics/instance/instance.go @@ -2,37 +2,18 @@ package instance import ( - "bytes" - "context" "crypto/md5" "encoding/hex" "encoding/json" "errors" "fmt" - "math" - "net/http" - "os" - "path/filepath" - "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/static/metrics/wal" "github.com/grafana/agent/internal/useragent" - "github.com/grafana/agent/internal/util" - "github.com/oklog/run" - "github.com/prometheus/client_golang/prometheus" - config_util "github.com/prometheus/common/config" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/relabel" - "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" - "go.uber.org/atomic" "gopkg.in/yaml.v2" ) @@ -195,578 +176,6 @@ func (c *Config) ApplyDefaults(global GlobalConfig) error { return nil } -// Clone makes a deep copy of the config along with global settings. -func (c *Config) Clone() (Config, error) { - bb, err := MarshalConfig(c, false) - if err != nil { - return Config{}, err - } - cp, err := UnmarshalConfig(bytes.NewReader(bb)) - if err != nil { - return Config{}, err - } - cp.global = c.global - - // Some tests will trip up on this; the marshal/unmarshal cycle might set - // an empty slice to nil. Set it back to an empty slice if we detect this - // happening. - if cp.ScrapeConfigs == nil && c.ScrapeConfigs != nil { - cp.ScrapeConfigs = []*config.ScrapeConfig{} - } - if cp.RemoteWrite == nil && c.RemoteWrite != nil { - cp.RemoteWrite = []*config.RemoteWriteConfig{} - } - - return *cp, nil -} - -type walStorageFactory func(reg prometheus.Registerer) (walStorage, error) - -// Instance is an individual metrics collector and remote_writer. -type Instance struct { - // All fields in the following block may be accessed and modified by - // concurrently running goroutines. - // - // Note that all Prometheus components listed here may be nil at any - // given time; methods reading them should take care to do nil checks. - mut sync.Mutex - cfg Config - wal walStorage - discovery *discoveryService - readyScrapeManager *readyScrapeManager - remoteStore *remote.Storage - storage storage.Storage - - // ready is set to true after the initialization process finishes - ready atomic.Bool - - hostFilter *HostFilter - - logger log.Logger - - reg prometheus.Registerer - newWal walStorageFactory - writeHandler http.Handler -} - -// New creates a new Instance with a directory for storing the WAL. The instance -// will not start until Run is called on the instance. -func New(reg prometheus.Registerer, cfg Config, walDir string, logger log.Logger) (*Instance, error) { - logger = log.With(logger, "instance", cfg.Name) - - instWALDir := filepath.Join(walDir, cfg.Name) - - newWal := func(reg prometheus.Registerer) (walStorage, error) { - return wal.NewStorage(logger, reg, instWALDir) - } - - return newInstance(cfg, reg, logger, newWal) -} - -func newInstance(cfg Config, reg prometheus.Registerer, logger log.Logger, newWal walStorageFactory) (*Instance, error) { - hostname, err := Hostname() - if err != nil { - return nil, fmt.Errorf("failed to get hostname: %w", err) - } - - i := &Instance{ - cfg: cfg, - logger: logger, - hostFilter: NewHostFilter(hostname, cfg.HostFilterRelabelConfigs), - - reg: reg, - newWal: newWal, - - readyScrapeManager: &readyScrapeManager{}, - } - - return i, nil -} - -// Run starts the instance, initializing Prometheus components, and will -// continue to run until an error happens during execution or the provided -// context is cancelled. -// -// Run may be re-called after exiting, as components will be reinitialized each -// time Run is called. -func (i *Instance) Run(ctx context.Context) error { - // i.cfg may change at any point in the middle of this method but not in a way - // that affects any of the code below; rather than grabbing a mutex every time - // we want to read the config, we'll simplify the access and just grab a copy - // now. - i.mut.Lock() - cfg := i.cfg - i.mut.Unlock() - - level.Debug(i.logger).Log("msg", "initializing instance", "name", cfg.Name) - - // trackingReg wraps the register for the instance to make sure that if Run - // exits, any metrics Prometheus registers are removed and can be - // re-registered if Run is called again. - trackingReg := util.WrapWithUnregisterer(i.reg) - defer trackingReg.UnregisterAll() - - if err := i.initialize(ctx, trackingReg, &cfg); err != nil { - level.Error(i.logger).Log("msg", "failed to initialize instance", "err", err) - return fmt.Errorf("failed to initialize instance: %w", err) - } - - // The actors defined here are defined in the order we want them to shut down. - // Primarily, we want to ensure that the following shutdown order is - // maintained: - // 1. The scrape manager stops - // 2. WAL storage is closed - // 3. Remote write storage is closed - // This is done to allow the instance to write stale markers for all active - // series. - rg := runGroupWithContext(ctx) - - { - // Target Discovery - rg.Add(i.discovery.Run, i.discovery.Stop) - } - { - // Truncation loop - ctx, contextCancel := context.WithCancel(context.Background()) - defer contextCancel() - rg.Add( - func() error { - i.truncateLoop(ctx, i.wal, &cfg) - level.Info(i.logger).Log("msg", "truncation loop stopped") - return nil - }, - func(err error) { - level.Info(i.logger).Log("msg", "stopping truncation loop...") - contextCancel() - }, - ) - } - { - sm, err := i.readyScrapeManager.Get() - if err != nil { - level.Error(i.logger).Log("msg", "failed to get scrape manager") - return err - } - - // Scrape manager - rg.Add( - func() error { - err := sm.Run(i.discovery.SyncCh()) - level.Info(i.logger).Log("msg", "scrape manager stopped") - return err - }, - func(err error) { - // The scrape manager is closed first to allow us to write staleness - // markers without receiving new samples from scraping in the meantime. - level.Info(i.logger).Log("msg", "stopping scrape manager...") - sm.Stop() - - // On a graceful shutdown, write staleness markers. If something went - // wrong, then the instance will be relaunched. - if err == nil && cfg.WriteStaleOnShutdown { - level.Info(i.logger).Log("msg", "writing staleness markers...") - err := i.wal.WriteStalenessMarkers(i.getRemoteWriteTimestamp) - if err != nil { - level.Error(i.logger).Log("msg", "error writing staleness markers", "err", err) - } - } - - // Closing the storage closes both the WAL storage and remote wrte - // storage. - level.Info(i.logger).Log("msg", "closing storage...") - if err := i.storage.Close(); err != nil { - level.Error(i.logger).Log("msg", "error stopping storage", "err", err) - } - }, - ) - } - - level.Debug(i.logger).Log("msg", "running instance", "name", cfg.Name) - i.ready.Store(true) - err := rg.Run() - if err != nil { - level.Error(i.logger).Log("msg", "agent instance stopped with error", "err", err) - } - return err -} - -// initialize sets up the various Prometheus components with their initial -// settings. initialize will be called each time the Instance is run. Prometheus -// components cannot be reused after they are stopped so we need to recreate them -// each run. -func (i *Instance) initialize(ctx context.Context, reg prometheus.Registerer, cfg *Config) error { - i.mut.Lock() - defer i.mut.Unlock() - - if cfg.HostFilter { - i.hostFilter.PatchSD(cfg.ScrapeConfigs) - } - - var err error - - i.wal, err = i.newWal(reg) - if err != nil { - return fmt.Errorf("error creating WAL: %w", err) - } - - i.writeHandler = remote.NewWriteHandler(i.logger, reg, i.wal) - - i.discovery, err = i.newDiscoveryManager(ctx, cfg) - if err != nil { - return fmt.Errorf("error creating discovery manager: %w", err) - } - - i.readyScrapeManager = &readyScrapeManager{} - - // Set up the remote storage - remoteLogger := log.With(i.logger, "component", "remote") - i.remoteStore = remote.NewStorage(remoteLogger, reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, i.readyScrapeManager) - uid := agentseed.Get().UID - for _, rw := range cfg.RemoteWrite { - if rw.Headers == nil { - rw.Headers = map[string]string{} - } - rw.Headers[agentseed.HeaderName] = uid - } - err = i.remoteStore.ApplyConfig(&config.Config{ - GlobalConfig: cfg.global.Prometheus, - RemoteWriteConfigs: cfg.RemoteWrite, - }) - if err != nil { - return fmt.Errorf("failed applying config to remote storage: %w", err) - } - - i.storage = storage.NewFanout(i.logger, i.wal, i.remoteStore) - - opts := &scrape.Options{ - ExtraMetrics: cfg.global.ExtraMetrics, - HTTPClientOptions: []config_util.HTTPClientOption{}, - } - - if cfg.global.DisableKeepAlives { - opts.HTTPClientOptions = append(opts.HTTPClientOptions, config_util.WithKeepAlivesDisabled()) - } - if cfg.global.IdleConnTimeout > 0 { - opts.HTTPClientOptions = append(opts.HTTPClientOptions, config_util.WithIdleConnTimeout(cfg.global.IdleConnTimeout)) - } - scrapeManager := newScrapeManager(opts, log.With(i.logger, "component", "scrape manager"), i.storage) - err = scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: cfg.global.Prometheus, - ScrapeConfigs: cfg.ScrapeConfigs, - }) - if err != nil { - return fmt.Errorf("failed applying config to scrape manager: %w", err) - } - - i.readyScrapeManager.Set(scrapeManager) - - return nil -} - -// Ready returns true if the Instance has been initialized and is ready -// to start scraping and delivering metrics. -func (i *Instance) Ready() bool { - return i.ready.Load() -} - -// Update accepts a new Config for the Instance and will dynamically update any -// running Prometheus components with the new values from Config. Update will -// return an ErrInvalidUpdate if the Update could not be applied. -func (i *Instance) Update(c Config) (err error) { - i.mut.Lock() - defer i.mut.Unlock() - - // It's only (currently) valid to update scrape_configs and remote_write, so - // if any other field has changed here, return the error. - switch { - // This first case should never happen in practice but it's included here for - // completion’s sake. - case i.cfg.Name != c.Name: - err = errImmutableField{Field: "name"} - case i.cfg.HostFilter != c.HostFilter: - err = errImmutableField{Field: "host_filter"} - case i.cfg.WALTruncateFrequency != c.WALTruncateFrequency: - err = errImmutableField{Field: "wal_truncate_frequency"} - case i.cfg.RemoteFlushDeadline != c.RemoteFlushDeadline: - err = errImmutableField{Field: "remote_flush_deadline"} - case i.cfg.WriteStaleOnShutdown != c.WriteStaleOnShutdown: - err = errImmutableField{Field: "write_stale_on_shutdown"} - } - if err != nil { - return ErrInvalidUpdate{Inner: err} - } - - // Check to see if the components exist yet. - if i.discovery == nil || i.remoteStore == nil || i.readyScrapeManager == nil { - return ErrInvalidUpdate{ - Inner: fmt.Errorf("cannot dynamically update because instance is not running"), - } - } - - // NOTE(rfratto): Prometheus applies configs in a specific order to ensure - // flow from service discovery down to the WAL continues working properly. - // - // Keep the following order below: - // - // 1. Local config - // 2. Remote Store - // 3. Scrape Manager - // 4. Discovery Manager - - originalConfig := i.cfg - defer func() { - if err != nil { - i.cfg = originalConfig - } - }() - i.cfg = c - - i.hostFilter.SetRelabels(c.HostFilterRelabelConfigs) - if c.HostFilter { - // N.B.: only call PatchSD if HostFilter is enabled since it - // mutates what targets will be discovered. - i.hostFilter.PatchSD(c.ScrapeConfigs) - } - - err = i.remoteStore.ApplyConfig(&config.Config{ - GlobalConfig: c.global.Prometheus, - RemoteWriteConfigs: c.RemoteWrite, - }) - if err != nil { - return fmt.Errorf("error applying new remote_write configs: %w", err) - } - - sm, err := i.readyScrapeManager.Get() - if err != nil { - return fmt.Errorf("couldn't get scrape manager to apply new scrape configs: %w", err) - } - err = sm.ApplyConfig(&config.Config{ - GlobalConfig: c.global.Prometheus, - ScrapeConfigs: c.ScrapeConfigs, - }) - if err != nil { - return fmt.Errorf("error applying updated configs to scrape manager: %w", err) - } - - sdConfigs := map[string]discovery.Configs{} - for _, v := range c.ScrapeConfigs { - sdConfigs[v.JobName] = v.ServiceDiscoveryConfigs - } - err = i.discovery.Manager.ApplyConfig(sdConfigs) - if err != nil { - return fmt.Errorf("failed applying configs to discovery manager: %w", err) - } - - return nil -} - -// TargetsActive returns the set of active targets from the scrape manager. Returns nil -// if the scrape manager is not ready yet. -func (i *Instance) TargetsActive() map[string][]*scrape.Target { - i.mut.Lock() - defer i.mut.Unlock() - - if i.readyScrapeManager == nil { - return nil - } - - mgr, err := i.readyScrapeManager.Get() - if err == ErrNotReady { - return nil - } else if err != nil { - level.Error(i.logger).Log("msg", "failed to get scrape manager when collecting active targets", "err", err) - return nil - } - return mgr.TargetsActive() -} - -// StorageDirectory returns the directory where this Instance is writing series -// and samples to for the WAL. -func (i *Instance) StorageDirectory() string { - return i.wal.Directory() -} - -// WriteHandler returns an HTTP handler for pushing metrics directly into the -// instance's WAL. -func (i *Instance) WriteHandler() http.Handler { - return i.writeHandler -} - -// Appender returns a storage.Appender from the instance's WAL -func (i *Instance) Appender(ctx context.Context) storage.Appender { - return i.wal.Appender(ctx) -} - -type discoveryService struct { - Manager *discovery.Manager - - RunFunc func() error - StopFunc func(err error) - SyncChFunc func() GroupChannel -} - -func (s *discoveryService) Run() error { return s.RunFunc() } -func (s *discoveryService) Stop(err error) { s.StopFunc(err) } -func (s *discoveryService) SyncCh() GroupChannel { return s.SyncChFunc() } - -// newDiscoveryManager returns an implementation of a runnable service -// that outputs discovered targets to a channel. The implementation -// uses the Prometheus Discovery Manager. Targets will be filtered -// if the instance is configured to perform host filtering. -func (i *Instance) newDiscoveryManager(ctx context.Context, cfg *Config) (*discoveryService, error) { - ctx, cancel := context.WithCancel(ctx) - - logger := log.With(i.logger, "component", "discovery manager") - manager := discovery.NewManager(ctx, logger, discovery.Name("scrape")) - - // TODO(rfratto): refactor this to a function? - // TODO(rfratto): ensure job name name is unique - c := map[string]discovery.Configs{} - for _, v := range cfg.ScrapeConfigs { - c[v.JobName] = v.ServiceDiscoveryConfigs - } - err := manager.ApplyConfig(c) - if err != nil { - cancel() - level.Error(i.logger).Log("msg", "failed applying config to discovery manager", "err", err) - return nil, fmt.Errorf("failed applying config to discovery manager: %w", err) - } - - rg := runGroupWithContext(ctx) - - // Run the manager - rg.Add(func() error { - err := manager.Run() - level.Info(i.logger).Log("msg", "discovery manager stopped") - return err - }, func(err error) { - level.Info(i.logger).Log("msg", "stopping discovery manager...") - cancel() - }) - - syncChFunc := manager.SyncCh - - // If host filtering is enabled, run it and use its channel for discovered - // targets. - if cfg.HostFilter { - rg.Add(func() error { - i.hostFilter.Run(manager.SyncCh()) - level.Info(i.logger).Log("msg", "host filterer stopped") - return nil - }, func(_ error) { - level.Info(i.logger).Log("msg", "stopping host filterer...") - i.hostFilter.Stop() - }) - - syncChFunc = i.hostFilter.SyncCh - } - - return &discoveryService{ - Manager: manager, - - RunFunc: rg.Run, - StopFunc: rg.Stop, - SyncChFunc: syncChFunc, - }, nil -} - -func (i *Instance) truncateLoop(ctx context.Context, wal walStorage, cfg *Config) { - // Track the last timestamp we truncated for to prevent segments from getting - // deleted until at least some new data has been sent. - var lastTs int64 = math.MinInt64 - - for { - select { - case <-ctx.Done(): - return - case <-time.After(cfg.WALTruncateFrequency): - // The timestamp ts is used to determine which series are not receiving - // samples and may be deleted from the WAL. Their most recent append - // timestamp is compared to ts, and if that timestamp is older than ts, - // they are considered inactive and may be deleted. - // - // Subtracting a duration from ts will delay when it will be considered - // inactive and scheduled for deletion. - ts := i.getRemoteWriteTimestamp() - i.cfg.MinWALTime.Milliseconds() - if ts < 0 { - ts = 0 - } - - // Network issues can prevent the result of getRemoteWriteTimestamp from - // changing. We don't want data in the WAL to grow forever, so we set a cap - // on the maximum age data can be. If our ts is older than this cutoff point, - // we'll shift it forward to start deleting very stale data. - if maxTS := timestamp.FromTime(time.Now().Add(-i.cfg.MaxWALTime)); ts < maxTS { - ts = maxTS - } - - if ts == lastTs { - level.Debug(i.logger).Log("msg", "not truncating the WAL, remote_write timestamp is unchanged", "ts", ts) - continue - } - lastTs = ts - - level.Debug(i.logger).Log("msg", "truncating the WAL", "ts", ts) - err := wal.Truncate(ts) - if err != nil { - // The only issue here is larger disk usage and a greater replay time, - // so we'll only log this as a warning. - level.Warn(i.logger).Log("msg", "could not truncate WAL", "err", err) - } - } - } -} - -// getRemoteWriteTimestamp looks up the last successful remote write timestamp. -// This is passed to wal.Storage for its truncation. If no remote write sections -// are configured, getRemoteWriteTimestamp returns the current time. -func (i *Instance) getRemoteWriteTimestamp() int64 { - i.mut.Lock() - defer i.mut.Unlock() - - if len(i.cfg.RemoteWrite) == 0 { - return timestamp.FromTime(time.Now()) - } - - if i.remoteStore == nil { - // Instance still being initialized; start at 0. - return 0 - } - return i.remoteStore.LowestSentTimestamp() -} - -// walStorage is an interface satisfied by wal.Storage, and created for testing. -type walStorage interface { - // walStorage implements Queryable/ChunkQueryable for compatibility, but is unused. - storage.Queryable - storage.ChunkQueryable - - Directory() string - - StartTime() (int64, error) - WriteStalenessMarkers(remoteTsFunc func() int64) error - Appender(context.Context) storage.Appender - Truncate(mint int64) error - - Close() error -} - -// Hostname retrieves the hostname identifying the machine the process is -// running on. It will return the value of $HOSTNAME, if defined, and fall -// back to Go's os.Hostname. -func Hostname() (string, error) { - hostname := os.Getenv("HOSTNAME") - if hostname != "" { - return hostname, nil - } - - hostname, err := os.Hostname() - if err != nil { - return "", fmt.Errorf("failed to get hostname: %w", err) - } - return hostname, nil -} - func getHash(data interface{}) (string, error) { bytes, err := json.Marshal(data) if err != nil { @@ -775,73 +184,3 @@ func getHash(data interface{}) (string, error) { hash := md5.Sum(bytes) return hex.EncodeToString(hash[:]), nil } - -var managerMtx sync.Mutex - -func newScrapeManager(o *scrape.Options, logger log.Logger, app storage.Appendable) *scrape.Manager { - // scrape.NewManager modifies a global variable in Prometheus. To avoid a - // data race of modifying that global, we lock a mutex here briefly. - managerMtx.Lock() - defer managerMtx.Unlock() - return scrape.NewManager(o, logger, app) -} - -type runGroupContext struct { - cancel context.CancelFunc - - g *run.Group -} - -// runGroupWithContext creates a new run.Group that will be stopped if the -// context gets canceled in addition to the normal behavior of stopping -// when any of the actors stop. -func runGroupWithContext(ctx context.Context) *runGroupContext { - ctx, cancel := context.WithCancel(ctx) - - var g run.Group - g.Add(func() error { - <-ctx.Done() - return nil - }, func(_ error) { - cancel() - }) - - return &runGroupContext{cancel: cancel, g: &g} -} - -func (rg *runGroupContext) Add(execute func() error, interrupt func(error)) { - rg.g.Add(execute, interrupt) -} - -func (rg *runGroupContext) Run() error { return rg.g.Run() } -func (rg *runGroupContext) Stop(_ error) { rg.cancel() } - -// ErrNotReady is returned when the scrape manager is used but has not been -// initialized yet. -var ErrNotReady = errors.New("Scrape manager not ready") - -// readyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time. -type readyScrapeManager struct { - mtx sync.RWMutex - m *scrape.Manager -} - -// Set the scrape manager. -func (rm *readyScrapeManager) Set(m *scrape.Manager) { - rm.mtx.Lock() - defer rm.mtx.Unlock() - - rm.m = m -} - -// Get the scrape manager. If is not ready, return an error. -func (rm *readyScrapeManager) Get() (*scrape.Manager, error) { - rm.mtx.RLock() - defer rm.mtx.RUnlock() - - if rm.m != nil { - return rm.m, nil - } - - return nil, ErrNotReady -} diff --git a/internal/static/metrics/instance/instance_integration_test.go b/internal/static/metrics/instance/instance_integration_test.go deleted file mode 100644 index 71fc2ed8b5..0000000000 --- a/internal/static/metrics/instance/instance_integration_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package instance - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/gorilla/mux" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/backoff" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -var slowBackoff = backoff.Config{ - MinBackoff: 1 * time.Second, - MaxBackoff: 1 * time.Minute, - MaxRetries: 10, -} - -// TestInstance_Update performs a full integration test by doing the following: -// -// 1. Launching an HTTP server which can be scraped and also mocks the remote_write -// endpoint. -// 2. Creating an instance config with no scrape_configs or remote_write configs. -// 3. Updates the instance with a scrape_config and remote_write. -// 4. Validates that after 15 seconds, the scrape endpoint and remote_write -// endpoint has been called. -func TestInstance_Update(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - - walDir := t.TempDir() - - var ( - scraped = atomic.NewBool(false) - pushed = atomic.NewBool(false) - ) - - r := mux.NewRouter() - r.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { - scraped.Store(true) - promhttp.Handler().ServeHTTP(w, r) - }) - r.HandleFunc("/push", func(w http.ResponseWriter, r *http.Request) { - pushed.Store(true) - // We don't particularly care what was pushed to us, so we'll ignore - // everything here; we just want to make sure the endpoint was invoked. - }) - - // Start a server for exposing the router. - l, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer l.Close() - go func() { - _ = http.Serve(l, r) - }() - - // Create a new instance where it's not scraping or writing anything by default. - initialConfig := loadConfig(t, ` -name: integration_test -scrape_configs: [] -remote_write: [] -`) - inst, err := New(prometheus.NewRegistry(), initialConfig, walDir, logger) - require.NoError(t, err) - - instCtx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - - wg.Add(1) - go func() { - defer wg.Done() - err := inst.Run(instCtx) - require.NoError(t, err) - }() - - // Update the config with a single scrape_config and remote_write. - newConfig := loadConfig(t, fmt.Sprintf(` -name: integration_test -scrape_configs: - - job_name: test_scrape - scrape_interval: 5s - static_configs: - - targets: ['%[1]s'] -remote_write: - - url: http://%[1]s/push -`, l.Addr())) - - // Wait for the instance to be ready before updating. - util.EventuallyWithBackoff(t, func(t require.TestingT) { - require.True(t, inst.Ready()) - }, slowBackoff) - - // Wait for the instance to update (it might not be ready yet and would - // return an error until everything is initialized), and then wait again for - // the configs to apply and set the scraped and pushed atomic variables, - // indicating that the Prometheus components successfully updated. - util.EventuallyWithBackoff(t, func(t require.TestingT) { - err := inst.Update(newConfig) - if err != nil { - logger.Log("msg", "failed to update instance", "err", err) - } - require.NoError(t, err) - }, slowBackoff) - - util.EventuallyWithBackoff(t, func(t require.TestingT) { - require.True(t, scraped.Load() && pushed.Load()) - }, slowBackoff) -} - -func TestInstance_Update_Failed(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - - walDir := t.TempDir() - - r := mux.NewRouter() - r.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { - promhttp.Handler().ServeHTTP(w, r) - }) - r.HandleFunc("/push", func(w http.ResponseWriter, r *http.Request) {}) - - // Start a server for exposing the router. - l, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer l.Close() - go func() { - _ = http.Serve(l, r) - }() - - // Create a new instance where it's not scraping or writing anything by default. - initialConfig := loadConfig(t, ` -name: integration_test -scrape_configs: [] -remote_write: [] -`) - inst, err := New(prometheus.NewRegistry(), initialConfig, walDir, logger) - require.NoError(t, err) - - instCtx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - - wg.Add(1) - go func() { - defer wg.Done() - err := inst.Run(instCtx) - require.NoError(t, err) - }() - - // Create a new config to use for updating - newConfig := loadConfig(t, fmt.Sprintf(` -name: integration_test -scrape_configs: - - job_name: test_scrape - scrape_interval: 5s - static_configs: - - targets: ['%[1]s'] -remote_write: - - url: http://%[1]s/push -`, l.Addr())) - - // Make sure the instance can successfully update first - util.Eventually(t, func(t require.TestingT) { - err := inst.Update(newConfig) - if err != nil { - logger.Log("msg", "failed to update instance", "err", err) - } - require.NoError(t, err) - }) - - // Now force an update back to the original config to fail - inst.readyScrapeManager.Set(nil) - require.NotNil(t, inst.Update(initialConfig), "update should have failed") - require.Equal(t, newConfig, inst.cfg, "config did not roll back") -} - -// TestInstance_Update_InvalidChanges runs an instance with a blank initial -// config and performs various unacceptable updates that should return an -// error. -func TestInstance_Update_InvalidChanges(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - - walDir := t.TempDir() - - // Create a new instance where it's not scraping or writing anything by default. - initialConfig := loadConfig(t, ` -name: integration_test -scrape_configs: [] -remote_write: [] -`) - inst, err := New(prometheus.NewRegistry(), initialConfig, walDir, logger) - require.NoError(t, err) - - instCtx, cancel := context.WithCancel(context.Background()) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - - wg.Add(1) - go func() { - defer wg.Done() - err := inst.Run(instCtx) - require.NoError(t, err) - }() - - // Do a no-op update that succeeds to ensure that the instance is running. - util.Eventually(t, func(t require.TestingT) { - err := inst.Update(initialConfig) - if err != nil { - logger.Log("msg", "failed to update instance", "err", err) - } - require.NoError(t, err) - }) - - tt := []struct { - name string - mut func(c *Config) - expect string - }{ - { - name: "name changed", - mut: func(c *Config) { c.Name = "changed name" }, - expect: "name cannot be changed dynamically", - }, - { - name: "host_filter changed", - mut: func(c *Config) { c.HostFilter = true }, - expect: "host_filter cannot be changed dynamically", - }, - { - name: "wal_truncate_frequency changed", - mut: func(c *Config) { c.WALTruncateFrequency *= 2 }, - expect: "wal_truncate_frequency cannot be changed dynamically", - }, - { - name: "remote_flush_deadline changed", - mut: func(c *Config) { c.RemoteFlushDeadline *= 2 }, - expect: "remote_flush_deadline cannot be changed dynamically", - }, - { - name: "write_stale_on_shutdown changed", - mut: func(c *Config) { c.WriteStaleOnShutdown = true }, - expect: "write_stale_on_shutdown cannot be changed dynamically", - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - mutatedConfig := initialConfig - tc.mut(&mutatedConfig) - - err := inst.Update(mutatedConfig) - require.EqualError(t, err, tc.expect) - }) - } -} - -func loadConfig(t *testing.T, s string) Config { - cfg, err := UnmarshalConfig(strings.NewReader(s)) - require.NoError(t, err) - require.NoError(t, cfg.ApplyDefaults(DefaultGlobalConfig)) - return *cfg -} diff --git a/internal/static/metrics/instance/instance_test.go b/internal/static/metrics/instance/instance_test.go index 0f97aecac2..33e6551f03 100644 --- a/internal/static/metrics/instance/instance_test.go +++ b/internal/static/metrics/instance/instance_test.go @@ -1,28 +1,13 @@ package instance import ( - "context" "fmt" - "net/http/httptest" - "os" - "path" "strings" - "sync" "testing" - "time" - "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" ) @@ -184,238 +169,3 @@ remote_write: require.NoError(t, cfg.ApplyDefaults(DefaultGlobalConfig)) require.NotEmpty(t, cfg.RemoteWrite[0].Name) } - -func TestInstance_Path(t *testing.T) { - scrapeAddr, closeSrv := getTestServer(t) - defer closeSrv() - - walDir := t.TempDir() - - globalConfig := getTestGlobalConfig(t) - - cfg := getTestConfig(t, &globalConfig, scrapeAddr) - cfg.WALTruncateFrequency = time.Hour - cfg.RemoteFlushDeadline = time.Hour - - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - inst, err := New(prometheus.NewRegistry(), cfg, walDir, logger) - require.NoError(t, err) - runInstance(t, inst) - - // / path should exist for WAL - util.Eventually(t, func(t require.TestingT) { - _, err := os.Stat(path.Join(walDir, "test")) - require.NoError(t, err) - }) -} - -// TestInstance tests that discovery and scraping are working by using a mock -// instance of the WAL storage and testing that samples get written to it. -// This test touches most of Instance and is enough for a basic integration test. -func TestInstance(t *testing.T) { - scrapeAddr, closeSrv := getTestServer(t) - defer closeSrv() - - walDir := t.TempDir() - - globalConfig := getTestGlobalConfig(t) - cfg := getTestConfig(t, &globalConfig, scrapeAddr) - cfg.WALTruncateFrequency = time.Hour - cfg.RemoteFlushDeadline = time.Hour - - mockStorage := mockWalStorage{ - series: make(map[storage.SeriesRef]int), - directory: walDir, - } - newWal := func(_ prometheus.Registerer) (walStorage, error) { return &mockStorage, nil } - - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - inst, err := newInstance(cfg, nil, logger, newWal) - require.NoError(t, err) - runInstance(t, inst) - - // Wait until mockWalStorage has had a series added to it. - util.EventuallyWithBackoff(t, func(t require.TestingT) { - mockStorage.mut.Lock() - defer mockStorage.mut.Unlock() - require.True(t, len(mockStorage.series) > 0) - }, slowBackoff) -} - -// TestInstance_Recreate ensures that creating an instance with the same name twice -// does not cause any duplicate metrics registration that leads to a panic. -func TestInstance_Recreate(t *testing.T) { - scrapeAddr, closeSrv := getTestServer(t) - defer closeSrv() - - walDir := t.TempDir() - - globalConfig := getTestGlobalConfig(t) - - cfg := getTestConfig(t, &globalConfig, scrapeAddr) - cfg.Name = "recreate_test" - cfg.WALTruncateFrequency = time.Hour - cfg.RemoteFlushDeadline = time.Hour - - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - currentReg := prometheus.NewRegistry() - inst, err := New(currentReg, cfg, walDir, logger) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - exited := make(chan bool) - go func() { - err := inst.Run(ctx) - close(exited) - - if err != nil { - require.Equal(t, context.Canceled, err) - } - }() - - time.Sleep(1 * time.Second) - cancel() - <-exited - - // Recreate the instance, no panic should happen. - require.NotPanics(t, func() { - inst, err := New(currentReg, cfg, walDir, logger) - require.NoError(t, err) - runInstance(t, inst) - - time.Sleep(1 * time.Second) - }) -} - -func getTestServer(t *testing.T) (addr string, closeFunc func()) { - t.Helper() - - reg := prometheus.NewRegistry() - - testCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "test_metric_total", - }) - testCounter.Inc() - reg.MustRegister(testCounter) - - handler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) - httpSrv := httptest.NewServer(handler) - return httpSrv.Listener.Addr().String(), httpSrv.Close -} - -func getTestGlobalConfig(t *testing.T) GlobalConfig { - t.Helper() - - return GlobalConfig{ - Prometheus: config.GlobalConfig{ - ScrapeInterval: model.Duration(time.Millisecond * 50), - ScrapeTimeout: model.Duration(time.Millisecond * 25), - EvaluationInterval: model.Duration(time.Hour), - }, - } -} - -func getTestConfig(t *testing.T, global *GlobalConfig, scrapeAddr string) Config { - t.Helper() - - scrapeCfg := config.DefaultScrapeConfig - scrapeCfg.JobName = "test" - scrapeCfg.ScrapeInterval = global.Prometheus.ScrapeInterval - scrapeCfg.ScrapeTimeout = global.Prometheus.ScrapeTimeout - scrapeCfg.ServiceDiscoveryConfigs = discovery.Configs{ - discovery.StaticConfig{{ - Targets: []model.LabelSet{{ - model.AddressLabel: model.LabelValue(scrapeAddr), - }}, - Labels: model.LabelSet{}, - }}, - } - - cfg := DefaultConfig - cfg.Name = "test" - cfg.ScrapeConfigs = []*config.ScrapeConfig{&scrapeCfg} - cfg.global = *global - - return cfg -} - -type mockWalStorage struct { - storage.Queryable - storage.ChunkQueryable - - directory string - mut sync.Mutex - series map[storage.SeriesRef]int -} - -func (s *mockWalStorage) Directory() string { return s.directory } -func (s *mockWalStorage) StartTime() (int64, error) { return 0, nil } -func (s *mockWalStorage) WriteStalenessMarkers(f func() int64) error { return nil } -func (s *mockWalStorage) Close() error { return nil } -func (s *mockWalStorage) Truncate(mint int64) error { return nil } - -func (s *mockWalStorage) Appender(context.Context) storage.Appender { - return &mockAppender{s: s} -} - -type mockAppender struct { - s *mockWalStorage -} - -func (a *mockAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - if ref == 0 { - return a.Add(l, t, v) - } - return ref, a.AddFast(ref, t, v) -} - -// Add adds a new series and sets its written count to 1. -func (a *mockAppender) Add(l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - a.s.mut.Lock() - defer a.s.mut.Unlock() - - hash := l.Hash() - a.s.series[storage.SeriesRef(hash)] = 1 - return storage.SeriesRef(hash), nil -} - -// AddFast increments the number of writes to an existing series. -func (a *mockAppender) AddFast(ref storage.SeriesRef, t int64, v float64) error { - a.s.mut.Lock() - defer a.s.mut.Unlock() - _, ok := a.s.series[ref] - if !ok { - return storage.ErrNotFound - } - - a.s.series[ref]++ - return nil -} - -func (a *mockAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - return 0, nil -} - -func (a *mockAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { - return 0, nil -} - -func (a *mockAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, nil -} - -func (a *mockAppender) Commit() error { - return nil -} - -func (a *mockAppender) Rollback() error { - return nil -} - -func runInstance(t *testing.T, i *Instance) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(func() { cancel() }) - go require.NotPanics(t, func() { - _ = i.Run(ctx) - }) -} diff --git a/internal/static/metrics/instance/manager.go b/internal/static/metrics/instance/manager.go deleted file mode 100644 index 6bb90324fd..0000000000 --- a/internal/static/metrics/instance/manager.go +++ /dev/null @@ -1,379 +0,0 @@ -package instance - -import ( - "context" - "errors" - "fmt" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" -) - -var ( - instanceAbnormalExits = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "agent_metrics_instance_abnormal_exits_total", - Help: "Total number of times a Prometheus instance exited unexpectedly, causing it to be restarted.", - }, []string{"instance_name"}) - - currentActiveInstances = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "agent_metrics_active_instances", - Help: "Current number of active instances being used by the agent.", - }) - - // DefaultBasicManagerConfig is the default config for the BasicManager. - DefaultBasicManagerConfig = BasicManagerConfig{ - InstanceRestartBackoff: 5 * time.Second, - } -) - -// Manager represents a set of methods for manipulating running instances at -// runtime. -type Manager interface { - // GetInstance retrieves a ManagedInstance by name. - GetInstance(name string) (ManagedInstance, error) - - // ListInstances returns all currently managed instances running - // within the Manager. The key will be the instance name from their config. - ListInstances() map[string]ManagedInstance - - // ListConfigs returns the config objects associated with a managed - // instance. The key will be the Name field from Config. - ListConfigs() map[string]Config - - // ApplyConfig creates a new Config or updates an existing Config if - // one with Config.Name already exists. - ApplyConfig(Config) error - - // DeleteConfig deletes a given managed instance based on its Config.Name. - DeleteConfig(name string) error - - // Stop stops the Manager and all managed instances. - Stop() -} - -// ManagedInstance is implemented by Instance. It is defined as an interface -// for the sake of testing from Manager implementations. -type ManagedInstance interface { - Run(ctx context.Context) error - Ready() bool - Update(c Config) error - TargetsActive() map[string][]*scrape.Target - StorageDirectory() string - Appender(ctx context.Context) storage.Appender - WriteHandler() http.Handler -} - -// BasicManagerConfig controls the operations of a BasicManager. -type BasicManagerConfig struct { - InstanceRestartBackoff time.Duration -} - -// BasicManager creates a new BasicManager, implementing the Manager interface. -// BasicManager will directly launch instances and perform no extra processing. -// -// Other implementations of Manager usually wrap a BasicManager. -type BasicManager struct { - cfgMut sync.Mutex - cfg BasicManagerConfig - logger log.Logger - - // Take care when locking mut: if you hold onto a lock of mut while calling - // Stop on a process, you will deadlock. - mut sync.Mutex - processes map[string]*managedProcess - - launch Factory -} - -// managedProcess represents a goroutine running a ManagedInstance. cancel -// requests that the goroutine should shutdown. done will be closed after the -// goroutine exists. -type managedProcess struct { - cfg Config - inst ManagedInstance - cancel context.CancelFunc - done chan bool -} - -func (p managedProcess) Stop() { - p.cancel() - <-p.done -} - -// Factory should return an unstarted instance given some config. -type Factory func(c Config) (ManagedInstance, error) - -// NewBasicManager creates a new BasicManager. The launch function will be -// invoked any time a new Config is applied. -// -// The lifecycle of any ManagedInstance returned by the launch function will -// be handled by the BasicManager. Instances will be automatically restarted -// if stopped, updated if the config changes, or removed when the Config is -// deleted. -func NewBasicManager(cfg BasicManagerConfig, logger log.Logger, launch Factory) *BasicManager { - return &BasicManager{ - cfg: cfg, - logger: logger, - processes: make(map[string]*managedProcess), - launch: launch, - } -} - -// UpdateManagerConfig updates the BasicManagerConfig. -func (m *BasicManager) UpdateManagerConfig(c BasicManagerConfig) { - m.cfgMut.Lock() - defer m.cfgMut.Unlock() - m.cfg = c -} - -// GetInstance returns the given instance by name. -func (m *BasicManager) GetInstance(name string) (ManagedInstance, error) { - m.mut.Lock() - defer m.mut.Unlock() - - process, ok := m.processes[name] - if !ok { - return nil, fmt.Errorf("instance %s does not exist", name) - } - return process.inst, nil -} - -// ListInstances returns the current active instances managed by BasicManager. -func (m *BasicManager) ListInstances() map[string]ManagedInstance { - m.mut.Lock() - defer m.mut.Unlock() - - res := make(map[string]ManagedInstance, len(m.processes)) - for name, process := range m.processes { - if process == nil { - continue - } - res[name] = process.inst - } - return res -} - -// ListConfigs lists the current active configs managed by BasicManager. -func (m *BasicManager) ListConfigs() map[string]Config { - m.mut.Lock() - defer m.mut.Unlock() - - res := make(map[string]Config, len(m.processes)) - for name, process := range m.processes { - res[name] = process.cfg - } - return res -} - -// ApplyConfig takes a Config and either starts a new managed instance or -// updates an existing managed instance. The value for Name in c is used to -// uniquely identify the Config and determine whether the Config has an -// existing associated managed instance. -func (m *BasicManager) ApplyConfig(c Config) error { - m.mut.Lock() - defer m.mut.Unlock() - - // If the config already exists, we need to update it. - proc, ok := m.processes[c.Name] - if ok { - err := proc.inst.Update(c) - - // If the instance could not be dynamically updated, we need to force the - // update by restarting it. If it failed for another reason, something - // serious went wrong and we'll completely give up without stopping the - // existing job. - if errors.Is(err, ErrInvalidUpdate{}) { - level.Info(m.logger).Log("msg", "could not dynamically update instance, will manually restart", "instance", c.Name, "reason", err) - - // NOTE: we don't return here; we fall through to spawn the new instance. - proc.Stop() - } else if err != nil { - return fmt.Errorf("failed to update instance %s: %w", c.Name, err) - } else { - level.Info(m.logger).Log("msg", "dynamically updated instance", "instance", c.Name) - - proc.cfg = c - return nil - } - } - - // Spawn a new process for the new config. - err := m.spawnProcess(c) - if err != nil { - return err - } - - currentActiveInstances.Inc() - return nil -} - -func (m *BasicManager) spawnProcess(c Config) error { - inst, err := m.launch(c) - if err != nil { - return err - } - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan bool) - - proc := &managedProcess{ - cancel: cancel, - done: done, - cfg: c, - inst: inst, - } - m.processes[c.Name] = proc - - go func() { - m.runProcess(ctx, c.Name, inst) - close(done) - - // Now that the process has stopped, we can remove it from our managed - // list. - // - // However, it's possible that a new Config may have been applied and - // overwrote the initial value in our map. We only want to delete the - // process from the map if it hasn't changed from what we initially - // set it to. - // - // We only use the instance for comparing (which will never change) because - // the instance may have dynamically been given a new config since this - // goroutine started. - m.mut.Lock() - if storedProc, exist := m.processes[c.Name]; exist && storedProc.inst == inst { - delete(m.processes, c.Name) - } - m.mut.Unlock() - - currentActiveInstances.Dec() - }() - - return nil -} - -// runProcess runs and instance and keeps it alive until it is explicitly stopped -// by cancelling the context. -func (m *BasicManager) runProcess(ctx context.Context, name string, inst ManagedInstance) { - for { - err := inst.Run(ctx) - if err != nil && err != context.Canceled { - backoff := m.instanceRestartBackoff() - - instanceAbnormalExits.WithLabelValues(name).Inc() - level.Error(m.logger).Log("msg", "instance stopped abnormally, restarting after backoff period", "err", err, "backoff", backoff, "instance", name) - time.Sleep(backoff) - } else { - level.Info(m.logger).Log("msg", "stopped instance", "instance", name) - break - } - } -} - -func (m *BasicManager) instanceRestartBackoff() time.Duration { - m.cfgMut.Lock() - defer m.cfgMut.Unlock() - return m.cfg.InstanceRestartBackoff -} - -// DeleteConfig removes a managed instance by its config name. Returns an error -// if there is no such managed instance with the given name. -func (m *BasicManager) DeleteConfig(name string) error { - m.mut.Lock() - proc, ok := m.processes[name] - if !ok { - m.mut.Unlock() - return errors.New("config does not exist") - } - m.mut.Unlock() - - // spawnProcess is responsible for removing the process from the map after it - // stops so we don't need to delete anything from m.processes here. - proc.Stop() - return nil -} - -// Stop stops the BasicManager and stops all active processes for configs. -func (m *BasicManager) Stop() { - var wg sync.WaitGroup - - // We don't need to change m.processes here; processes remove themselves - // from the map (in spawnProcess). - m.mut.Lock() - wg.Add(len(m.processes)) - for _, proc := range m.processes { - go func(proc *managedProcess) { - proc.Stop() - wg.Done() - }(proc) - } - m.mut.Unlock() - - wg.Wait() -} - -// MockManager exposes methods of the Manager interface as struct fields. -// Useful for tests. -type MockManager struct { - GetInstanceFunc func(name string) (ManagedInstance, error) - ListInstancesFunc func() map[string]ManagedInstance - ListConfigsFunc func() map[string]Config - ApplyConfigFunc func(Config) error - DeleteConfigFunc func(name string) error - StopFunc func() -} - -// GetInstance implements Manager. -func (m MockManager) GetInstance(name string) (ManagedInstance, error) { - if m.GetInstanceFunc != nil { - return m.GetInstanceFunc(name) - } - panic("GetInstanceFunc not implemented") -} - -// ListInstances implements Manager. -func (m MockManager) ListInstances() map[string]ManagedInstance { - if m.ListInstancesFunc != nil { - return m.ListInstancesFunc() - } - panic("ListInstancesFunc not implemented") -} - -// ListConfigs implements Manager. -func (m MockManager) ListConfigs() map[string]Config { - if m.ListConfigsFunc != nil { - return m.ListConfigsFunc() - } - panic("ListConfigsFunc not implemented") -} - -// ApplyConfig implements Manager. -func (m MockManager) ApplyConfig(c Config) error { - if m.ApplyConfigFunc != nil { - return m.ApplyConfigFunc(c) - } - panic("ApplyConfigFunc not implemented") -} - -// DeleteConfig implements Manager. -func (m MockManager) DeleteConfig(name string) error { - if m.DeleteConfigFunc != nil { - return m.DeleteConfigFunc(name) - } - panic("DeleteConfigFunc not implemented") -} - -// Stop implements Manager. -func (m MockManager) Stop() { - if m.StopFunc != nil { - m.StopFunc() - return - } - panic("StopFunc not implemented") -} diff --git a/internal/static/metrics/instance/manager_test.go b/internal/static/metrics/instance/manager_test.go deleted file mode 100644 index 6afed26732..0000000000 --- a/internal/static/metrics/instance/manager_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package instance - -import ( - "context" - "fmt" - "net/http" - "os" - "testing" - - "github.com/go-kit/log" - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" - "github.com/stretchr/testify/require" -) - -func TestBasicManager_ApplyConfig(t *testing.T) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - - baseMock := mockInstance{ - RunFunc: func(ctx context.Context) error { - logger.Log("msg", "starting an instance") - <-ctx.Done() - return nil - }, - UpdateFunc: func(c Config) error { - return nil - }, - TargetsActiveFunc: func() map[string][]*scrape.Target { - return nil - }, - } - - t.Run("dynamic update successful", func(t *testing.T) { - spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { - spawnedCount++ - - newMock := baseMock - return &newMock, nil - } - - cm := NewBasicManager(DefaultBasicManagerConfig, logger, spawner) - - for i := 0; i < 10; i++ { - err := cm.ApplyConfig(Config{Name: "test"}) - require.NoError(t, err) - } - - require.Equal(t, 1, spawnedCount) - }) - - t.Run("dynamic update unsuccessful", func(t *testing.T) { - spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { - spawnedCount++ - - newMock := baseMock - newMock.UpdateFunc = func(c Config) error { - return ErrInvalidUpdate{ - Inner: fmt.Errorf("cannot dynamically update for testing reasons"), - } - } - return &newMock, nil - } - - cm := NewBasicManager(DefaultBasicManagerConfig, logger, spawner) - - for i := 0; i < 10; i++ { - err := cm.ApplyConfig(Config{Name: "test"}) - require.NoError(t, err) - } - - require.Equal(t, 10, spawnedCount) - }) - - t.Run("dynamic update errored", func(t *testing.T) { - spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { - spawnedCount++ - - newMock := baseMock - newMock.UpdateFunc = func(c Config) error { - return fmt.Errorf("something really bad happened") - } - return &newMock, nil - } - - cm := NewBasicManager(DefaultBasicManagerConfig, logger, spawner) - - // Creation should succeed - err := cm.ApplyConfig(Config{Name: "test"}) - require.NoError(t, err) - - // ...but the update should fail - err = cm.ApplyConfig(Config{Name: "test"}) - require.Error(t, err, "something really bad happened") - require.Equal(t, 1, spawnedCount) - }) -} - -type mockInstance struct { - RunFunc func(ctx context.Context) error - ReadyFunc func() bool - UpdateFunc func(c Config) error - TargetsActiveFunc func() map[string][]*scrape.Target - StorageDirectoryFunc func() string - AppenderFunc func() storage.Appender - WriteHandlerFunc func() http.Handler -} - -func (m mockInstance) Run(ctx context.Context) error { - if m.RunFunc != nil { - return m.RunFunc(ctx) - } - panic("RunFunc not provided") -} - -func (m mockInstance) Ready() bool { - if m.ReadyFunc != nil { - return m.ReadyFunc() - } - panic("ReadyFunc not provided") -} - -func (m mockInstance) Update(c Config) error { - if m.UpdateFunc != nil { - return m.UpdateFunc(c) - } - panic("UpdateFunc not provided") -} - -func (m mockInstance) TargetsActive() map[string][]*scrape.Target { - if m.TargetsActiveFunc != nil { - return m.TargetsActiveFunc() - } - panic("TargetsActiveFunc not provided") -} - -func (m mockInstance) StorageDirectory() string { - if m.StorageDirectoryFunc != nil { - return m.StorageDirectoryFunc() - } - panic("StorageDirectoryFunc not provided") -} - -func (m mockInstance) WriteHandler() http.Handler { - if m.WriteHandlerFunc != nil { - return m.WriteHandlerFunc() - } - panic("GetWriteHandlerFunc not provided") -} - -func (m mockInstance) Appender(_ context.Context) storage.Appender { - if m.AppenderFunc != nil { - return m.AppenderFunc() - } - panic("AppenderFunc not provided") -} diff --git a/internal/static/metrics/instance/modal_manager.go b/internal/static/metrics/instance/modal_manager.go index 18abb4f1ed..7308b351aa 100644 --- a/internal/static/metrics/instance/modal_manager.go +++ b/internal/static/metrics/instance/modal_manager.go @@ -2,12 +2,6 @@ package instance import ( "fmt" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" ) // Mode controls how instances are created. @@ -42,175 +36,3 @@ func (m *Mode) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("unsupported instance_mode '%s'. supported values 'shared', 'distinct'", plain) } } - -// ModalManager runs instances by either grouping them or running them fully -// separately. -type ModalManager struct { - mut sync.RWMutex - mode Mode - configs map[string]Config - - changedConfigs *prometheus.CounterVec - currentActiveConfigs prometheus.Gauge - - log log.Logger - - // The ModalManager wraps around a "final" Manager that is intended to - // launch and manage instances based on Configs. This is specified here by the - // "wrapped" Manager. - // - // However, there may be another manager performing formations on the configs - // before they are passed through to wrapped. This is specified by the "active" - // Manager. - // - // If no transformations on Configs are needed, active will be identical to - // wrapped. - wrapped, active Manager -} - -// NewModalManager creates a new ModalManager. -func NewModalManager(reg prometheus.Registerer, l log.Logger, next Manager, mode Mode) (*ModalManager, error) { - changedConfigs := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "agent_metrics_configs_changed_total", - Help: "Total number of dynamically updated configs", - }, []string{"event"}) - currentActiveConfigs := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "agent_metrics_active_configs", - Help: "Current number of active configs being used by the agent.", - }) - - mm := ModalManager{ - wrapped: next, - log: l, - changedConfigs: changedConfigs, - currentActiveConfigs: currentActiveConfigs, - configs: make(map[string]Config), - } - if err := mm.SetMode(mode); err != nil { - return nil, err - } - return &mm, nil -} - -// SetMode updates the mode ModalManager is running in. Changing the mode is -// an expensive operation; all underlying configs must be stopped and then -// reapplied. -func (m *ModalManager) SetMode(newMode Mode) error { - if newMode == "" { - newMode = DefaultMode - } - - m.mut.Lock() - defer m.mut.Unlock() - - var ( - prevMode = m.mode - prevActive = m.active - ) - - if prevMode == newMode { - return nil - } - - // Set the active Manager based on the new mode. "distinct" means no transformations - // need to be applied and we can use the wrapped Manager directly. Otherwise, we need - // to create a new Manager to apply transformations. - switch newMode { - case ModeDistinct: - m.active = m.wrapped - case ModeShared: - m.active = NewGroupManager(m.wrapped) - default: - panic("unknown mode " + m.mode) - } - m.mode = newMode - - // Remove all configs from the previous active Manager. - if prevActive != nil { - prevActive.Stop() - } - - // Re-apply configs to the new active Manager. - var firstError error - for name, cfg := range m.configs { - err := m.active.ApplyConfig(cfg) - if err != nil { - level.Error(m.log).Log("msg", "failed to apply config when changing modes", "name", name, "prev_mode", prevMode, "new_mode", newMode, "err", err) - } - if firstError == nil && err != nil { - firstError = err - } - } - - return firstError -} - -// GetInstance implements Manager. -func (m *ModalManager) GetInstance(name string) (ManagedInstance, error) { - m.mut.RLock() - defer m.mut.RUnlock() - return m.active.GetInstance(name) -} - -// ListInstances implements Manager. -func (m *ModalManager) ListInstances() map[string]ManagedInstance { - m.mut.RLock() - defer m.mut.RUnlock() - return m.active.ListInstances() -} - -// ListConfigs implements Manager. -func (m *ModalManager) ListConfigs() map[string]Config { - m.mut.RLock() - defer m.mut.RUnlock() - return m.active.ListConfigs() -} - -// ApplyConfig implements Manager. -func (m *ModalManager) ApplyConfig(c Config) error { - m.mut.Lock() - defer m.mut.Unlock() - - if err := m.active.ApplyConfig(c); err != nil { - return err - } - - if _, existingConfig := m.configs[c.Name]; !existingConfig { - m.currentActiveConfigs.Inc() - m.changedConfigs.WithLabelValues("created").Inc() - } else { - m.changedConfigs.WithLabelValues("updated").Inc() - } - - m.configs[c.Name] = c - - return nil -} - -// DeleteConfig implements Manager. -func (m *ModalManager) DeleteConfig(name string) error { - m.mut.Lock() - defer m.mut.Unlock() - - if err := m.active.DeleteConfig(name); err != nil { - return err - } - - if _, existingConfig := m.configs[name]; existingConfig { - m.currentActiveConfigs.Dec() - delete(m.configs, name) - } - - m.changedConfigs.WithLabelValues("deleted").Inc() - return nil -} - -// Stop implements Manager. -func (m *ModalManager) Stop() { - m.mut.Lock() - defer m.mut.Unlock() - - m.active.Stop() - m.currentActiveConfigs.Set(0) - m.configs = make(map[string]Config) -} diff --git a/internal/static/metrics/instance/noop.go b/internal/static/metrics/instance/noop.go deleted file mode 100644 index f9f86b8713..0000000000 --- a/internal/static/metrics/instance/noop.go +++ /dev/null @@ -1,49 +0,0 @@ -package instance - -import ( - "context" - "net/http" - - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" -) - -// NoOpInstance implements the Instance interface in pkg/prom -// but does not do anything. Useful for tests. -type NoOpInstance struct{} - -// Run implements Instance. -func (NoOpInstance) Run(ctx context.Context) error { - <-ctx.Done() - return nil -} - -// Ready implements Instance. -func (NoOpInstance) Ready() bool { - return true -} - -// Update implements Instance. -func (NoOpInstance) Update(_ Config) error { - return nil -} - -// TargetsActive implements Instance. -func (NoOpInstance) TargetsActive() map[string][]*scrape.Target { - return nil -} - -// StorageDirectory implements Instance. -func (NoOpInstance) StorageDirectory() string { - return "" -} - -// WriteHandler implements Instance. -func (NoOpInstance) WriteHandler() http.Handler { - return nil -} - -// Appender implements Instance -func (NoOpInstance) Appender(_ context.Context) storage.Appender { - return nil -} diff --git a/internal/static/server/logger.go b/internal/static/server/logger.go deleted file mode 100644 index 0068775ac2..0000000000 --- a/internal/static/server/logger.go +++ /dev/null @@ -1,118 +0,0 @@ -package server - -import ( - "sync" - - "github.com/go-kit/log" - util_log "github.com/grafana/agent/internal/util/log" - dskit "github.com/grafana/dskit/log" -) - -// Logger implements Go Kit's log.Logger interface. It supports being -// dynamically updated at runtime. -type Logger struct { - // mut protects against race conditions accessing l, which can be modified - // and accessed concurrently if ApplyConfig and Log are called at the same - // time. - mut sync.RWMutex - l log.Logger - - // HookLogger is used to temporarily hijack logs for support bundles. - HookLogger HookLogger - - // makeLogger will default to defaultLogger. It's a struct - // member to make testing work properly. - makeLogger func(*Config) (log.Logger, error) -} - -// HookLogger is used to temporarily redirect -type HookLogger struct { - mut sync.RWMutex - enabled bool - logger log.Logger -} - -// NewLogger creates a new Logger. -func NewLogger(cfg *Config) *Logger { - return newLogger(cfg, defaultLogger) -} - -// NewLoggerFromLevel creates a new logger from logging.Level and logging.Format. -func NewLoggerFromLevel(lvl dskit.Level, fmt string) *Logger { - logger, err := makeDefaultLogger(lvl, fmt) - if err != nil { - panic(err) - } - return &Logger{ - l: logger, - } -} - -func newLogger(cfg *Config, ctor func(*Config) (log.Logger, error)) *Logger { - l := Logger{makeLogger: ctor} - if err := l.ApplyConfig(cfg); err != nil { - panic(err) - } - return &l -} - -// ApplyConfig applies configuration changes to the logger. -func (l *Logger) ApplyConfig(cfg *Config) error { - l.mut.Lock() - defer l.mut.Unlock() - - newLogger, err := l.makeLogger(cfg) - if err != nil { - return err - } - - l.l = newLogger - return nil -} - -func defaultLogger(cfg *Config) (log.Logger, error) { - return makeDefaultLogger(cfg.LogLevel.Level, cfg.LogFormat) -} - -func makeDefaultLogger(lvl dskit.Level, fmt string) (log.Logger, error) { - var l log.Logger - - l, err := util_log.NewPrometheusLogger(lvl, fmt) - if err != nil { - return nil, err - } - - // There are two wrappers on the log so skip two extra stacks vs default - return log.With(l, "caller", log.Caller(5)), nil -} - -// Log logs a log line. -func (l *Logger) Log(kvps ...interface{}) error { - l.mut.RLock() - defer l.mut.RUnlock() - err := l.HookLogger.Log(kvps...) - if err != nil { - return err - } - return l.l.Log(kvps...) -} - -// Log implements log.Logger. -func (hl *HookLogger) Log(kvps ...interface{}) error { - hl.mut.RLock() - defer hl.mut.RUnlock() - if hl.enabled { - return hl.logger.Log(kvps...) - } - return nil -} - -// Set where HookedLogger should tee logs to. -// If a nil logger is passed, the HookedLogger is disabled. -func (hl *HookLogger) Set(l log.Logger) { - hl.mut.Lock() - defer hl.mut.Unlock() - - hl.enabled = l != nil - hl.logger = l -} diff --git a/internal/static/server/logger_test.go b/internal/static/server/logger_test.go deleted file mode 100644 index 083933e495..0000000000 --- a/internal/static/server/logger_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package server - -import ( - "bytes" - "testing" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" -) - -func TestLogger_DefaultParameters(t *testing.T) { - makeLogger := func(cfg *Config) (log.Logger, error) { - var l log.Logger - require.Equal(t, "info", cfg.LogLevel.String()) - require.Equal(t, "logfmt", cfg.LogFormat) - return l, nil - } - defaultCfg := DefaultConfig() - newLogger(&defaultCfg, makeLogger).makeLogger(&defaultCfg) -} - -func TestLogger_ApplyConfig(t *testing.T) { - var buf bytes.Buffer - makeLogger := func(cfg *Config) (log.Logger, error) { - l := log.NewLogfmtLogger(log.NewSyncWriter(&buf)) - if cfg.LogFormat == "json" { - l = log.NewJSONLogger(log.NewSyncWriter(&buf)) - } - l = level.NewFilter(l, cfg.LogLevel.Level.Option) - return l, nil - } - - var cfg Config - cfgText := `log_level: error` - - err := yaml.Unmarshal([]byte(cfgText), &cfg) - require.NoError(t, err) - - l := newLogger(&cfg, makeLogger) - level.Debug(l).Log("msg", "this should not appear") - - cfgText = ` -log_level: debug -log_format: json` - err = yaml.Unmarshal([]byte(cfgText), &cfg) - require.NoError(t, err) - - err = l.ApplyConfig(&cfg) - require.NoError(t, err) - - level.Debug(l).Log("msg", "this should appear") - require.JSONEq(t, `{ - "level":"debug", - "msg":"this should appear" - }`, buf.String()) -} diff --git a/internal/static/server/logger_windows.go b/internal/static/server/logger_windows.go deleted file mode 100644 index c84bd0888c..0000000000 --- a/internal/static/server/logger_windows.go +++ /dev/null @@ -1,110 +0,0 @@ -package server - -import ( - "runtime" - "strings" - - "github.com/go-kit/log/level" - - "github.com/go-kit/log" - el "golang.org/x/sys/windows/svc/eventlog" -) - -// Default name for the Grafana Agent under Windows -const ServiceName = "Grafana Agent" - -// NewWindowsEventLogger creates a new logger that writes to the event log -func NewWindowsEventLogger(cfg *Config) *Logger { - return newLogger(cfg, makeWindowsEventLogger) -} - -func makeWindowsEventLogger(cfg *Config) (log.Logger, error) { - // Set up the log in windows events - err := el.InstallAsEventCreate(ServiceName, el.Error|el.Info|el.Warning) - - // Agent should expect an error of 'already exists' if the Event Log sink has already previously been installed - if err != nil && !strings.Contains(err.Error(), "already exists") { - return nil, err - } - il, err := el.Open(ServiceName) - if err != nil { - return nil, err - } - - // Ensure the logger gets closed when the GC runs. It's valid to have more than one win logger open concurrently. - runtime.SetFinalizer(il, func(l *el.Log) { - l.Close() - }) - - // These are set up to be writers for each Windows log level - // Set up this way so we can utilize all the benefits of logformatter - infoLogger := newWinLogWrapper(cfg.LogFormat, func(p []byte) error { - return il.Info(1, string(p)) - }) - warningLogger := newWinLogWrapper(cfg.LogFormat, func(p []byte) error { - return il.Warning(1, string(p)) - }) - - errorLogger := newWinLogWrapper(cfg.LogFormat, func(p []byte) error { - return il.Error(1, string(p)) - }) - - wl := &winLogger{ - errorLogger: errorLogger, - infoLogger: infoLogger, - warningLogger: warningLogger, - } - return level.NewFilter(wl, cfg.LogLevel.Level.Option), nil -} - -// Looks through the key value pairs in the log for level and extract the value -func getLevel(keyvals ...interface{}) level.Value { - for i := 0; i < len(keyvals); i++ { - if vo, ok := keyvals[i].(level.Value); ok { - return vo - } - } - return nil -} - -func newWinLogWrapper(format string, write func(p []byte) error) log.Logger { - infoWriter := &winLogWriter{writer: write} - infoLogger := log.NewLogfmtLogger(infoWriter) - if format == "json" { - infoLogger = log.NewJSONLogger(infoWriter) - } - return infoLogger -} - -type winLogger struct { - errorLogger log.Logger - infoLogger log.Logger - warningLogger log.Logger -} - -func (w *winLogger) Log(keyvals ...interface{}) error { - lvl := getLevel(keyvals...) - // 3 different loggers are used so that agent can utilize the formatting features of go-kit logging - // if agent did not use this then the windows logger uses different function calls for different levels - // this is paired with the fact that the io.Writer interface only gives a byte array. - switch lvl { - case level.DebugValue(): - return w.infoLogger.Log(keyvals...) - case level.InfoValue(): - return w.infoLogger.Log(keyvals...) - case level.WarnValue(): - return w.warningLogger.Log(keyvals...) - case level.ErrorValue(): - return w.errorLogger.Log(keyvals...) - default: - return w.infoLogger.Log(keyvals...) - } -} - -type winLogWriter struct { - writer func(p []byte) error -} - -func (i *winLogWriter) Write(p []byte) (n int, err error) { - return len(p), i.writer(p) -} diff --git a/internal/static/server/server.go b/internal/static/server/server.go index 26f6210003..adae8c6a30 100644 --- a/internal/static/server/server.go +++ b/internal/static/server/server.go @@ -6,438 +6,9 @@ package server import ( "context" - "errors" - "fmt" "net" - "net/http" - _ "net/http/pprof" // anonymous import to get the pprof handler registered - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/grafana/ckit/memconn" - "github.com/grafana/dskit/middleware" - _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get the godeltaprof handler registered - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/hashicorp/go-multierror" - "github.com/oklog/run" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "golang.org/x/net/netutil" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" ) // DialContextFunc is a function matching the signature of // net.Dialer.DialContext. type DialContextFunc func(ctx context.Context, network string, addr string) (net.Conn, error) - -// Server wraps an HTTP and gRPC server with some common initialization. -// -// Unless instrumentation is disabled in the Servers config, Prometheus metrics -// will be automatically generated for the server. -type Server struct { - flagsMut sync.Mutex - flags Flags - - // Listeners for in-memory connections. These never use TLS. - httpMemListener *memconn.Listener - grpcMemListener *memconn.Listener - - // Listeners to use for connections. These will use TLS when TLS is enabled. - httpListener net.Listener - grpcListener net.Listener - - updateHTTPTLS func(TLSConfig) error - updateGRPCTLS func(TLSConfig) error - - HTTP *mux.Router - HTTPServer *http.Server - GRPC *grpc.Server - - // DialContext creates a connection to the given network/address. If address - // matches the Server's internal HTTP or gRPC address, an internal in-memory - // connection will be opened. - DialContext DialContextFunc -} - -type metrics struct { - tcpConnections *prometheus.GaugeVec - tcpConnectionsLimit *prometheus.GaugeVec - requestDuration *prometheus.HistogramVec - receivedMessageSize *prometheus.HistogramVec - sentMessageSize *prometheus.HistogramVec - inflightRequests *prometheus.GaugeVec -} - -func newMetrics(r prometheus.Registerer) (*metrics, error) { - var m metrics - - // Create metrics for the server - m.tcpConnections = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "agent_tcp_connections", - Help: "Current number of accepted TCP connections.", - }, []string{"protocol"}) - m.tcpConnectionsLimit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "agent_tcp_connections_limit", - Help: "The maximum number of TCP connections that can be accepted (0 = unlimited)", - }, []string{"protocol"}) - m.requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "agent_request_duration_seconds", - Help: "Time in seconds spent serving HTTP requests.", - }, []string{"method", "route", "status_code", "ws"}) - m.receivedMessageSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "agent_request_message_bytes", - Help: "Size (in bytes) of messages received in the request.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - m.sentMessageSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "agent_response_message_bytes", - Help: "Size (in bytes) of messages sent in response.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - m.inflightRequests = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "agent_inflight_requests", - Help: "Current number of inflight requests.", - }, []string{"method", "route"}) - - if r != nil { - // Register all of our metrics - cc := []prometheus.Collector{ - m.tcpConnections, m.tcpConnectionsLimit, m.requestDuration, m.receivedMessageSize, - m.sentMessageSize, m.inflightRequests, - } - for _, c := range cc { - if err := r.Register(c); err != nil { - return nil, fmt.Errorf("failed registering server metrics: %w", err) - } - } - } - return &m, nil -} - -// New creates a new Server with the given config. -// -// r is used to register Server-specific metrics. If r is nil, no metrics will -// be registered. -// -// g is used for collecting metrics from the instrumentation handlers, when -// enabled. If g is nil, a /metrics endpoint will not be registered. -func New(l log.Logger, r prometheus.Registerer, g prometheus.Gatherer, cfg Config, flags Flags) (srv *Server, err error) { - if l == nil { - l = log.NewNopLogger() - } - - switch { - case flags.HTTP.InMemoryAddr == "": - return nil, fmt.Errorf("in memory HTTP address must be configured") - case flags.GRPC.InMemoryAddr == "": - return nil, fmt.Errorf("in memory gRPC address must be configured") - case flags.HTTP.InMemoryAddr == flags.GRPC.InMemoryAddr: - return nil, fmt.Errorf("in memory HTTP and gRPC address must be different") - } - - m, err := newMetrics(r) - if err != nil { - return nil, err - } - - // Create listeners first so we can fail early if the port is in use. - httpListener, err := newHTTPListener(&flags.HTTP, m) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = httpListener.Close() - } - }() - grpcListener, err := newGRPCListener(&flags.GRPC, m) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = httpListener.Close() - } - }() - - // Configure TLS - var ( - updateHTTPTLS func(TLSConfig) error - updateGRPCTLS func(TLSConfig) error - ) - if flags.HTTP.UseTLS { - httpTLSListener, err := newTLSListener(httpListener, cfg.HTTP.TLSConfig, l) - if err != nil { - return nil, fmt.Errorf("generating HTTP TLS config: %w", err) - } - httpListener = httpTLSListener - updateHTTPTLS = httpTLSListener.ApplyConfig - } - if flags.GRPC.UseTLS { - grpcTLSListener, err := newTLSListener(grpcListener, cfg.GRPC.TLSConfig, l) - if err != nil { - return nil, fmt.Errorf("generating GRPC TLS config: %w", err) - } - grpcListener = grpcTLSListener - updateGRPCTLS = grpcTLSListener.ApplyConfig - } - - level.Info(l).Log( - "msg", "server listening on addresses", - "http", httpListener.Addr(), "grpc", grpcListener.Addr(), - "http_tls_enabled", flags.HTTP.UseTLS, "grpc_tls_enabled", flags.GRPC.UseTLS, - ) - - // Build servers - grpcServer := newGRPCServer(l, &flags.GRPC, m) - httpServer, router, err := newHTTPServer(l, g, &flags, m) - if err != nil { - return nil, err - } - - // Build in-memory listeners and dial function - var ( - httpMemListener = memconn.NewListener(nil) - grpcMemListener = memconn.NewListener(nil) - ) - dialFunc := func(ctx context.Context, network string, address string) (net.Conn, error) { - switch address { - case flags.HTTP.InMemoryAddr: - return httpMemListener.DialContext(ctx) - case flags.GRPC.InMemoryAddr: - return grpcMemListener.DialContext(ctx) - default: - return (&net.Dialer{}).DialContext(ctx, network, address) - } - } - - return &Server{ - flags: flags, - httpListener: httpListener, - grpcListener: grpcListener, - httpMemListener: httpMemListener, - grpcMemListener: grpcMemListener, - - updateHTTPTLS: updateHTTPTLS, - updateGRPCTLS: updateGRPCTLS, - - HTTP: router, - HTTPServer: httpServer, - GRPC: grpcServer, - DialContext: dialFunc, - }, nil -} - -func newHTTPListener(opts *HTTPFlags, m *metrics) (net.Listener, error) { - httpAddress := opts.ListenAddress - if httpAddress == "" { - return nil, fmt.Errorf("http address not set") - } - httpListener, err := net.Listen(opts.ListenNetwork, httpAddress) - if err != nil { - return nil, fmt.Errorf("creating HTTP listener: %w", err) - } - httpListener = middleware.CountingListener(httpListener, m.tcpConnections.WithLabelValues("http")) - - m.tcpConnectionsLimit.WithLabelValues("http").Set(float64(opts.ConnLimit)) - if opts.ConnLimit > 0 { - httpListener = netutil.LimitListener(httpListener, opts.ConnLimit) - } - return httpListener, nil -} - -func newGRPCListener(opts *GRPCFlags, m *metrics) (net.Listener, error) { - grpcAddress := opts.ListenAddress - if grpcAddress == "" { - return nil, fmt.Errorf("gRPC address not set") - } - grpcListener, err := net.Listen(opts.ListenNetwork, grpcAddress) - if err != nil { - return nil, fmt.Errorf("creating gRPC listener: %w", err) - } - grpcListener = middleware.CountingListener(grpcListener, m.tcpConnections.WithLabelValues("grpc")) - - m.tcpConnectionsLimit.WithLabelValues("grpc").Set(float64(opts.ConnLimit)) - if opts.ConnLimit > 0 { - grpcListener = netutil.LimitListener(grpcListener, opts.ConnLimit) - } - return grpcListener, nil -} - -func newGRPCServer(l log.Logger, opts *GRPCFlags, m *metrics) *grpc.Server { - serverLog := middleware.GRPCServerLog{ - WithRequest: true, - Log: l, - } - grpcOptions := []grpc.ServerOption{ - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - serverLog.UnaryServerInterceptor, - otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), - middleware.UnaryServerInstrumentInterceptor(m.requestDuration), - )), - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - serverLog.StreamServerInterceptor, - otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), - middleware.StreamServerInstrumentInterceptor(m.requestDuration), - )), - grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionIdle: opts.MaxConnectionIdle, - MaxConnectionAge: opts.MaxConnectionAge, - MaxConnectionAgeGrace: opts.MaxConnectionAgeGrace, - Time: opts.KeepaliveTime, - Timeout: opts.KeepaliveTimeout, - }), - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: opts.MinTimeBetweenPings, - PermitWithoutStream: opts.PingWithoutStreamAllowed, - }), - grpc.MaxRecvMsgSize(opts.MaxRecvMsgSize), - grpc.MaxSendMsgSize(opts.MaxSendMsgSize), - grpc.MaxConcurrentStreams(uint32(opts.MaxConcurrentStreams)), - grpc.StatsHandler(middleware.NewStatsHandler(m.receivedMessageSize, m.sentMessageSize, m.inflightRequests)), - } - - return grpc.NewServer(grpcOptions...) -} - -func newHTTPServer(l log.Logger, g prometheus.Gatherer, opts *Flags, m *metrics) (*http.Server, *mux.Router, error) { - router := mux.NewRouter() - if opts.RegisterInstrumentation && g != nil { - router.Handle("/metrics", promhttp.HandlerFor(g, promhttp.HandlerOpts{ - EnableOpenMetrics: true, - })) - router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux) - } - - var sourceIPs *middleware.SourceIPExtractor - if opts.LogSourceIPs { - var err error - sourceIPs, err = middleware.NewSourceIPs(opts.LogSourceIPsHeader, opts.LogSourceIPsRegex) - if err != nil { - return nil, nil, fmt.Errorf("error setting up source IP extraction: %v", err) - } - } - - httpMiddleware := []middleware.Interface{ - middleware.Tracer{ - RouteMatcher: router, - SourceIPs: sourceIPs, - }, - middleware.Log{ - Log: l, - SourceIPs: sourceIPs, - }, - middleware.Instrument{ - RouteMatcher: router, - Duration: m.requestDuration, - RequestBodySize: m.receivedMessageSize, - ResponseBodySize: m.sentMessageSize, - InflightRequests: m.inflightRequests, - }, - } - - httpServer := &http.Server{ - ReadTimeout: opts.HTTP.ReadTimeout, - WriteTimeout: opts.HTTP.WriteTimeout, - IdleTimeout: opts.HTTP.IdleTimeout, - Handler: middleware.Merge(httpMiddleware...).Wrap(router), - } - - return httpServer, router, nil -} - -// HTTPAddress returns the HTTP net.Addr of this Server. -func (s *Server) HTTPAddress() net.Addr { return s.httpListener.Addr() } - -// GRPCAddress returns the GRPC net.Addr of this Server. -func (s *Server) GRPCAddress() net.Addr { return s.grpcListener.Addr() } - -// ApplyConfig applies changes to the Server block. -func (s *Server) ApplyConfig(cfg Config) error { - s.flagsMut.Lock() - defer s.flagsMut.Unlock() - - // N.B. LogLevel/LogFormat support dynamic updating but are never used in - // *Server, so they're ignored here. - - if s.updateHTTPTLS != nil { - if err := s.updateHTTPTLS(cfg.HTTP.TLSConfig); err != nil { - return fmt.Errorf("updating HTTP TLS settings: %w", err) - } - } - if s.updateGRPCTLS != nil { - if err := s.updateGRPCTLS(cfg.GRPC.TLSConfig); err != nil { - return fmt.Errorf("updating gRPC TLS settings: %w", err) - } - } - - return nil -} - -// Run the server until en error is received or the given context is canceled. -// Run may not be re-called after it exits. -func (s *Server) Run(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - var g run.Group - - g.Add(func() error { - <-ctx.Done() - return nil - }, func(_ error) { - cancel() - }) - - httpListeners := []net.Listener{ - s.httpListener, - s.httpMemListener, - } - for i := range httpListeners { - listener := httpListeners[i] - g.Add(func() error { - err := s.HTTPServer.Serve(listener) - if errors.Is(err, http.ErrServerClosed) { - err = nil - } - return err - }, func(_ error) { - ctx, cancel := context.WithTimeout(context.Background(), s.flags.GracefulShutdownTimeout) - defer cancel() - _ = s.HTTPServer.Shutdown(ctx) - }) - } - - grpcListeners := []net.Listener{ - s.grpcListener, - s.grpcMemListener, - } - for i := range grpcListeners { - listener := grpcListeners[i] - g.Add(func() error { - err := s.GRPC.Serve(listener) - if errors.Is(err, grpc.ErrServerStopped) { - err = nil - } - return err - }, func(_ error) { - s.GRPC.GracefulStop() - }) - } - - return g.Run() -} - -// Close forcibly closes the server's listeners. -func (s *Server) Close() error { - errs := multierror.Append( - s.httpListener.Close(), - s.grpcListener.Close(), - ) - return errs.ErrorOrNil() -} diff --git a/internal/static/server/server_test.go b/internal/static/server/server_test.go deleted file mode 100644 index effa46b72e..0000000000 --- a/internal/static/server/server_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package server - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "testing" - - "github.com/go-kit/log" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -const anyLocalhost = "127.0.0.1:0" - -func TestServer(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - srv := runExampleServer(t, cfg, flags) - - // Validate HTTP - resp, err := http.Get(fmt.Sprintf("http://%s/testing", srv.HTTPAddress())) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - _ = resp.Body.Close() - - // Validate gRPC - creds := grpc.WithTransportCredentials(insecure.NewCredentials()) - cc, err := grpc.Dial(srv.GRPCAddress().String(), creds) - require.NoError(t, err) - _, err = grpc_health_v1.NewHealthClient(cc).Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}) - require.NoError(t, err) -} - -func TestServer_InMemory(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - srv := runExampleServer(t, cfg, flags) - - // Validate HTTP - var httpClient http.Client - httpClient.Transport = &http.Transport{DialContext: srv.DialContext} - resp, err := httpClient.Get(fmt.Sprintf("http://%s/testing", flags.HTTP.InMemoryAddr)) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - _ = resp.Body.Close() - - // Validate gRPC - grpcDialer := grpc.WithContextDialer(func(ctx context.Context, s string) (net.Conn, error) { - return srv.DialContext(ctx, "", s) - }) - cc, err := grpc.Dial(flags.GRPC.InMemoryAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpcDialer) - require.NoError(t, err) - _, err = grpc_health_v1.NewHealthClient(cc).Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}) - require.NoError(t, err) -} - -func newTestConfig() Config { - cfg := DefaultConfig() - return cfg -} - -func newTestFlags() Flags { - flags := DefaultFlags - flags.HTTP.ListenAddress = anyLocalhost - flags.GRPC.ListenAddress = anyLocalhost - return flags -} - -func runExampleServer(t *testing.T, cfg Config, flags Flags) *Server { - t.Helper() - - srv, err := New(log.NewNopLogger(), nil, nil, cfg, flags) - require.NoError(t, err) - - // Set up some expected services for us to test against. - srv.HTTP.HandleFunc("/testing", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - }) - grpc_health_v1.RegisterHealthServer(srv.GRPC, health.NewServer()) - - // Run our server. - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - go func() { - require.NoError(t, srv.Run(ctx)) - }() - - return srv -} - -func TestServer_TLS(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - - flags.HTTP.UseTLS = true - flags.GRPC.UseTLS = true - - tlsConfig := TLSConfig{ - TLSCertPath: "testdata/example-cert.pem", - TLSKeyPath: "testdata/example-key.pem", - } - cfg.HTTP.TLSConfig = tlsConfig - cfg.GRPC.TLSConfig = tlsConfig - - srv := runExampleServer(t, cfg, flags) - - // Validate HTTPS - cli := http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }, - } - resp, err := cli.Get(fmt.Sprintf("https://%s/testing", srv.HTTPAddress())) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - _ = resp.Body.Close() - - // Validate gRPC TLS - creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) - cc, err := grpc.Dial(srv.GRPCAddress().String(), grpc.WithTransportCredentials(creds)) - require.NoError(t, err) - _, err = grpc_health_v1.NewHealthClient(cc).Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}) - require.NoError(t, err) -} - -// TestRunReturnsError validates that Run exits with an error when the -// HTTP/GRPC servers stop unexpectedly. -func TestRunReturnsError(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - - t.Run("http", func(t *testing.T) { - srv, err := New(nil, nil, nil, cfg, flags) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error, 1) - go func() { - errChan <- srv.Run(ctx) - }() - - require.NoError(t, srv.httpListener.Close()) - require.NotNil(t, <-errChan) - }) - - t.Run("grpc", func(t *testing.T) { - srv, err := New(nil, nil, nil, cfg, flags) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error, 1) - go func() { - errChan <- srv.Run(ctx) - }() - - require.NoError(t, srv.grpcListener.Close()) - require.NotNil(t, <-errChan) - }) -} - -func TestServer_ApplyConfig(t *testing.T) { - t.Run("no changes", func(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - - srv, err := New(nil, nil, nil, cfg, flags) - require.NoError(t, err) - - require.NoError(t, srv.ApplyConfig(cfg)) - }) - - t.Run("valid changes", func(t *testing.T) { - cfg := newTestConfig() - flags := newTestFlags() - - srv, err := New(nil, nil, nil, cfg, flags) - require.NoError(t, err) - - cfg.LogLevel.Set("debug") - require.NoError(t, srv.ApplyConfig(cfg)) - }) -} diff --git a/internal/static/server/signal_context.go b/internal/static/server/signal_context.go deleted file mode 100644 index 21ac6376ce..0000000000 --- a/internal/static/server/signal_context.go +++ /dev/null @@ -1,41 +0,0 @@ -package server - -import ( - "context" - - "github.com/go-kit/log" - "github.com/grafana/dskit/signals" - "go.uber.org/atomic" -) - -var signalContexts atomic.Int64 - -// SignalContext wraps a ctx which will be canceled if an interrupt is -// received. -// -// It is invalid to have two simultaneous SignalContexts per binary. -func SignalContext(ctx context.Context, l log.Logger) (context.Context, context.CancelFunc) { - if !signalContexts.CompareAndSwap(0, 1) { - panic("bug: multiple SignalContexts found") - } - - if l == nil { - l = log.NewNopLogger() - } - - ctx, cancel := context.WithCancel(ctx) - - handler := signals.NewHandler(l) - go func() { - handler.Loop() - signalContexts.Store(0) - cancel() - }() - go func() { - <-ctx.Done() - handler.Stop() - signalContexts.Store(0) - }() - - return ctx, cancel -} diff --git a/internal/static/server/tls.go b/internal/static/server/tls.go index 0e78edac58..5ab671f820 100644 --- a/internal/static/server/tls.go +++ b/internal/static/server/tls.go @@ -2,15 +2,9 @@ package server import ( "crypto/tls" - "crypto/x509" "errors" "fmt" - "net" - "os" - "sync" "time" - - "github.com/go-kit/log" ) // TLSConfig holds dynamic configuration options for TLS. @@ -142,152 +136,6 @@ func (tv *TLSVersion) MarshalYAML() (interface{}, error) { return fmt.Sprintf("%v", tv), nil } -// tlsListener is a net.Listener for establishing TLS connections. tlsListener -// supports dynamically updating the TLS settings used to establish -// connections. -type tlsListener struct { - mut sync.RWMutex - cfg TLSConfig - tlsConfig *tls.Config - log log.Logger - - innerListener net.Listener - - windowsCertHandler *WinCertStoreHandler -} - -// newTLSListener creates and configures a new tlsListener. -func newTLSListener(inner net.Listener, c TLSConfig, log log.Logger) (*tlsListener, error) { - tl := &tlsListener{ - innerListener: inner, - log: log, - } - return tl, tl.ApplyConfig(c) -} - -// Accept implements net.Listener and returns the next connection. Connections -func (l *tlsListener) Accept() (net.Conn, error) { - nc, err := l.innerListener.Accept() - if err != nil { - return nc, err - } - - l.mut.RLock() - defer l.mut.RUnlock() - return tls.Server(nc, l.tlsConfig), nil -} - -// Close implements net.Listener and closes the tlsListener, preventing any new -// connections from being formed. Existing connections will be kept alive. -func (l *tlsListener) Close() error { - if l.windowsCertHandler != nil { - l.windowsCertHandler.Stop() - } - return l.innerListener.Close() -} - -// Addr implements net.Listener and returns the listener's network address. -func (l *tlsListener) Addr() net.Addr { - return l.innerListener.Addr() -} - -// ApplyConfig updates the tlsListener with new settings for creating TLS -// connections. -// -// Existing TLS connections will be kept alive after updating the TLS settings. -// New connections cannot be established while ApplyConfig is running. -func (l *tlsListener) ApplyConfig(c TLSConfig) error { - l.mut.Lock() - defer l.mut.Unlock() - if c.WindowsCertificateFilter != nil { - return l.applyWindowsCertificateStore(c) - } - return l.applyNormalTLS(c) -} - -func (l *tlsListener) applyNormalTLS(c TLSConfig) error { - if l.windowsCertHandler != nil { - panic("windows certificate handler is set this should never happen") - } - // Convert our TLSConfig into a new *tls.Config. - // - // While *tls.Config supports callbacks and doesn't need to be fully - // replaced, some of our dynamic settings from TLSConfig can't be dynamically - // updated (e.g., ciphers, min/max version, etc.). - // - // To make life easier on ourselves we just replace the whole thing with a new TLS listener. - - // Make sure that the certificates exist - if c.TLSCertPath == "" { - return fmt.Errorf("missing certificate file") - } - if c.TLSKeyPath == "" { - return fmt.Errorf("missing key file") - } - _, err := tls.LoadX509KeyPair(c.TLSCertPath, c.TLSKeyPath) - if err != nil { - return fmt.Errorf("failed to load key pair: %w", err) - } - - newConfig := &tls.Config{ - MinVersion: (uint16)(c.MinVersion), - MaxVersion: (uint16)(c.MaxVersion), - PreferServerCipherSuites: c.PreferServerCipherSuites, - - GetCertificate: l.getCertificate, - } - - var cf []uint16 - for _, c := range c.CipherSuites { - cf = append(cf, (uint16)(c)) - } - if len(cf) > 0 { - newConfig.CipherSuites = cf - } - - var cp []tls.CurveID - for _, c := range c.CurvePreferences { - cp = append(cp, (tls.CurveID)(c)) - } - if len(cp) > 0 { - newConfig.CurvePreferences = cp - } - - if c.ClientCAs != "" { - clientCAPool := x509.NewCertPool() - clientCAFile, err := os.ReadFile(c.ClientCAs) - if err != nil { - return err - } - clientCAPool.AppendCertsFromPEM(clientCAFile) - newConfig.ClientCAs = clientCAPool - } - - clientAuth, err := GetClientAuthFromString(c.ClientAuth) - if err != nil { - return err - } - newConfig.ClientAuth = clientAuth - if c.ClientCAs != "" && newConfig.ClientAuth == tls.NoClientCert { - return fmt.Errorf("Client CAs have been configured without a ClientAuth policy") - } - - l.tlsConfig = newConfig - l.cfg = c - return nil -} - -func (l *tlsListener) getCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) { - l.mut.RLock() - defer l.mut.RUnlock() - - cert, err := tls.LoadX509KeyPair(l.cfg.TLSCertPath, l.cfg.TLSKeyPath) - if err != nil { - return nil, fmt.Errorf("failed to load key pair: %w", err) - } - return &cert, nil -} - func GetClientAuthFromString(clientAuth string) (tls.ClientAuthType, error) { switch clientAuth { case "RequestClientCert": diff --git a/internal/static/server/tls_certstore_stub.go b/internal/static/server/tls_certstore_stub.go index 6b6dd8430b..ba1defe3d1 100644 --- a/internal/static/server/tls_certstore_stub.go +++ b/internal/static/server/tls_certstore_stub.go @@ -2,12 +2,6 @@ package server -import "fmt" - -func (l *tlsListener) applyWindowsCertificateStore(_ TLSConfig) error { - return fmt.Errorf("cannot use Windows certificate store on non-Windows platforms") -} - type WinCertStoreHandler struct { } diff --git a/internal/static/server/tls_certstore_windows.go b/internal/static/server/tls_certstore_windows.go index c80406114b..fca1965748 100644 --- a/internal/static/server/tls_certstore_windows.go +++ b/internal/static/server/tls_certstore_windows.go @@ -59,66 +59,6 @@ func NewWinCertStoreHandler(cfg WindowsCertificateFilter, clientAuth tls.ClientA return cn, nil } -func (l *tlsListener) applyWindowsCertificateStore(c TLSConfig) error { - - // Restrict normal TLS options when using windows certificate store - if c.TLSCertPath != "" { - return fmt.Errorf("at most one of cert_file and windows_certificate_filter can be configured") - } - if c.TLSKeyPath != "" { - return fmt.Errorf("at most one of cert_key and windows_certificate_filter can be configured") - } - if c.WindowsCertificateFilter.Server == nil { - return fmt.Errorf("windows certificate filter requires a server block defined") - } - - var subjectRegEx *regexp.Regexp - var err error - if c.WindowsCertificateFilter.Client != nil && c.WindowsCertificateFilter.Client.SubjectRegEx != "" { - subjectRegEx, err = regexp.Compile(c.WindowsCertificateFilter.Client.SubjectRegEx) - if err != nil { - return fmt.Errorf("error compiling subject common name regular expression: %w", err) - } - } - - // If there is an existing windows certhandler stop it. - if l.windowsCertHandler != nil { - l.windowsCertHandler.Stop() - } - - cn := &WinCertStoreHandler{ - cfg: *c.WindowsCertificateFilter, - subjectRegEx: subjectRegEx, - log: l.log, - shutdown: make(chan struct{}), - } - - err = cn.refreshCerts() - if err != nil { - return err - } - - config := &tls.Config{ - VerifyPeerCertificate: cn.VerifyPeer, - GetCertificate: cn.CertificateHandler, - MaxVersion: uint16(c.MaxVersion), - MinVersion: uint16(c.MinVersion), - } - - ca, err := GetClientAuthFromString(c.ClientAuth) - if err != nil { - return err - } - config.ClientAuth = ca - cn.clientAuth = ca - // Kick off the refresh handler - go cn.startUpdateTimer() - l.windowsCertHandler = cn - l.tlsConfig = config - l.cfg = c - return nil -} - // Run runs the filter refresh. Stop should be called when done. func (c *WinCertStoreHandler) Run() { go c.startUpdateTimer() diff --git a/internal/static/server/tls_test.go b/internal/static/server/tls_test.go deleted file mode 100644 index de9a2402c0..0000000000 --- a/internal/static/server/tls_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package server - -import ( - "crypto/tls" - "fmt" - "io" - "log" - "net" - "net/http" - "net/url" - "testing" - - kitlog "github.com/go-kit/log" - "github.com/stretchr/testify/require" -) - -func Test_tlsListener(t *testing.T) { - rawLis, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - tlsConfig := TLSConfig{ - TLSCertPath: "testdata/example-cert.pem", - TLSKeyPath: "testdata/example-key.pem", - ClientAuth: "NoClientCert", - } - tlsLis, err := newTLSListener(rawLis, tlsConfig, kitlog.NewNopLogger()) - require.NoError(t, err) - - httpSrv := &http.Server{ - ErrorLog: log.New(io.Discard, "", 0), - } - go func() { - _ = httpSrv.Serve(tlsLis) - }() - defer func() { - httpSrv.Close() - }() - - httpTransport := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - cli := http.Client{Transport: httpTransport} - - resp, err := cli.Get(fmt.Sprintf("https://%s", tlsLis.Addr())) - if err == nil { - resp.Body.Close() - } - require.NoError(t, err) - - // Update our TLSConfig to require a client cert. - tlsConfig.ClientAuth = "RequireAndVerifyClientCert" - require.NoError(t, tlsLis.ApplyConfig(tlsConfig)) - - // Close our idle connections so our next request forces a new dial. - httpTransport.CloseIdleConnections() - - // Create a second connection which should now fail because we don't supply a - resp, err = cli.Get(fmt.Sprintf("https://%s", tlsLis.Addr())) - if err == nil { - resp.Body.Close() - } - - var urlError *url.Error - require.ErrorAs(t, err, &urlError) - require.Contains(t, urlError.Err.Error(), "tls:") -} diff --git a/internal/static/supportbundle/supportbundle.go b/internal/static/supportbundle/supportbundle.go deleted file mode 100644 index 3963c2a9cc..0000000000 --- a/internal/static/supportbundle/supportbundle.go +++ /dev/null @@ -1,235 +0,0 @@ -package supportbundle - -import ( - "archive/zip" - "bytes" - "context" - "fmt" - "io" - "net/http" - "path/filepath" - "runtime" - "runtime/pprof" - "strings" - "sync" - "time" - - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/server" - "github.com/mackerelio/go-osstat/uptime" - "gopkg.in/yaml.v3" -) - -// Bundle collects all the data that is exposed as a support bundle. -type Bundle struct { - meta []byte - config []byte - agentMetrics []byte - agentMetricsInstances []byte - agentMetricsTargets []byte - agentLogsInstances []byte - agentLogsTargets []byte - heapBuf *bytes.Buffer - goroutineBuf *bytes.Buffer - blockBuf *bytes.Buffer - mutexBuf *bytes.Buffer - cpuBuf *bytes.Buffer -} - -// Metadata contains general runtime information about the current Agent. -type Metadata struct { - BuildVersion string `yaml:"build_version"` - OS string `yaml:"os"` - Architecture string `yaml:"architecture"` - Uptime float64 `yaml:"uptime"` - Payload map[string]interface{} `yaml:"payload"` -} - -// Used to enforce single-flight requests to Export -var mut sync.Mutex - -// Export gathers the information required for the support bundle. -func Export(ctx context.Context, enabledFeatures []string, cfg []byte, srvAddress string, dialContext server.DialContextFunc) (*Bundle, error) { - mut.Lock() - defer mut.Unlock() - // The block profiler is disabled by default. Temporarily enable recording - // of all blocking events. Also, temporarily record all mutex contentions, - // and defer restoring of earlier mutex profiling fraction. - runtime.SetBlockProfileRate(1) - old := runtime.SetMutexProfileFraction(1) - defer func() { - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(old) - }() - - // Gather runtime metadata. - ut, err := uptime.Get() - if err != nil { - return nil, err - } - m := Metadata{ - BuildVersion: build.Version, - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - Uptime: ut.Seconds(), - Payload: map[string]interface{}{"enabled-features": enabledFeatures}, - } - meta, err := yaml.Marshal(m) - if err != nil { - return nil, fmt.Errorf("failed to marshal support bundle metadata: %s", err) - } - - var httpClient http.Client - httpClient.Transport = &http.Transport{DialContext: dialContext} - // Gather Agent's own metrics. - resp, err := httpClient.Get("http://" + srvAddress + "/metrics") - if err != nil { - return nil, fmt.Errorf("failed to get internal Agent metrics: %s", err) - } - agentMetrics, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read internal Agent metrics: %s", err) - } - - // Collect the Agent metrics instances and target statuses. - resp, err = httpClient.Get("http://" + srvAddress + "/agent/api/v1/metrics/instances") - if err != nil { - return nil, fmt.Errorf("failed to get internal Agent metrics: %s", err) - } - agentMetricsInstances, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read internal Agent metrics: %s", err) - } - resp, err = httpClient.Get("http://" + srvAddress + "/agent/api/v1/metrics/targets") - if err != nil { - return nil, fmt.Errorf("failed to get Agent metrics targets: %s", err) - } - agentMetricsTargets, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read Agent metrics targets: %s", err) - } - - // Collect the Agent's logs instances and target statuses. - resp, err = httpClient.Get("http://" + srvAddress + "/agent/api/v1/logs/instances") - if err != nil { - return nil, fmt.Errorf("failed to get Agent logs instances: %s", err) - } - agentLogsInstances, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read Agent logs instances: %s", err) - } - - resp, err = httpClient.Get("http://" + srvAddress + "/agent/api/v1/logs/targets") - if err != nil { - return nil, fmt.Errorf("failed to get Agent logs targets: %s", err) - } - agentLogsTargets, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read Agent logs targets: %s", err) - } - - // Export pprof data. - var ( - cpuBuf bytes.Buffer - heapBuf bytes.Buffer - goroutineBuf bytes.Buffer - blockBuf bytes.Buffer - mutexBuf bytes.Buffer - ) - err = pprof.StartCPUProfile(&cpuBuf) - if err != nil { - return nil, err - } - deadline, _ := ctx.Deadline() - // Sleep for the remaining of the context deadline, but leave some time for - // the rest of the bundle to be exported successfully. - time.Sleep(time.Until(deadline) - 200*time.Millisecond) - pprof.StopCPUProfile() - - p := pprof.Lookup("heap") - if err := p.WriteTo(&heapBuf, 0); err != nil { - return nil, err - } - p = pprof.Lookup("goroutine") - if err := p.WriteTo(&goroutineBuf, 0); err != nil { - return nil, err - } - p = pprof.Lookup("block") - if err := p.WriteTo(&blockBuf, 0); err != nil { - return nil, err - } - p = pprof.Lookup("mutex") - if err := p.WriteTo(&mutexBuf, 0); err != nil { - return nil, err - } - - // Finally, bundle everything up to be served, either as a zip from - // memory, or exported to a directory. - bundle := &Bundle{ - meta: meta, - config: cfg, - agentMetrics: agentMetrics, - agentMetricsInstances: agentMetricsInstances, - agentMetricsTargets: agentMetricsTargets, - agentLogsInstances: agentLogsInstances, - agentLogsTargets: agentLogsTargets, - heapBuf: &heapBuf, - goroutineBuf: &goroutineBuf, - blockBuf: &blockBuf, - mutexBuf: &mutexBuf, - cpuBuf: &cpuBuf, - } - - return bundle, nil -} - -// Serve the collected data and logs as a zip file over the given -// http.ResponseWriter. -func Serve(rw http.ResponseWriter, b *Bundle, logsBuf *bytes.Buffer) error { - zw := zip.NewWriter(rw) - rw.Header().Set("Content-Type", "application/zip") - rw.Header().Set("Content-Disposition", "attachment; filename=\"agent-support-bundle.zip\"") - - zipStructure := map[string][]byte{ - "agent-metadata.yaml": b.meta, - "agent-config.yaml": b.config, - "agent-metrics.txt": b.agentMetrics, - "agent-metrics-instances.json": b.agentMetricsInstances, - "agent-metrics-targets.json": b.agentMetricsTargets, - "agent-logs-instances.json": b.agentLogsInstances, - "agent-logs-targets.json": b.agentLogsTargets, - "agent-logs.txt": logsBuf.Bytes(), - "pprof/cpu.pprof": b.cpuBuf.Bytes(), - "pprof/heap.pprof": b.heapBuf.Bytes(), - "pprof/goroutine.pprof": b.goroutineBuf.Bytes(), - "pprof/mutex.pprof": b.mutexBuf.Bytes(), - "pprof/block.pprof": b.blockBuf.Bytes(), - } - - for fn, b := range zipStructure { - if b != nil { - path := append([]string{"agent-support-bundle"}, strings.Split(fn, "/")...) - if err := writeByteSlice(zw, b, path...); err != nil { - return err - } - } - } - - err := zw.Close() - if err != nil { - return fmt.Errorf("failed to flush the zip writer: %v", err) - } - return nil -} - -func writeByteSlice(zw *zip.Writer, b []byte, fn ...string) error { - f, err := zw.Create(filepath.Join(fn...)) - if err != nil { - return err - } - _, err = f.Write(b) - if err != nil { - return err - } - return nil -} diff --git a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go index c347e5cbb5..0624e1a54d 100644 --- a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go +++ b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go @@ -4,25 +4,14 @@ import ( "context" "errors" "fmt" - "strconv" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/go-logfmt/logfmt" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/traces/contextkeys" - "github.com/grafana/agent/internal/util" util_log "github.com/grafana/agent/internal/util/log" - "github.com/grafana/loki/clients/pkg/promtail/api" - "github.com/grafana/loki/pkg/logproto" - "github.com/prometheus/common/model" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor" - semconv "go.opentelemetry.io/collector/semconv/v1.6.1" "go.uber.org/atomic" ) @@ -35,19 +24,14 @@ const ( defaultTraceIDKey = "tid" defaultTimeout = time.Millisecond - - typeSpan = "span" - typeRoot = "root" - typeProcess = "process" ) type automaticLoggingProcessor struct { nextConsumer consumer.Traces - cfg *AutomaticLoggingConfig - logToStdout bool - logsInstance *logs.Instance - done atomic.Bool + cfg *AutomaticLoggingConfig + logToStdout bool + done atomic.Bool labels map[string]struct{} @@ -105,77 +89,7 @@ func newTraceProcessor(nextConsumer consumer.Traces, cfg *AutomaticLoggingConfig } func (p *automaticLoggingProcessor) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { - rsLen := td.ResourceSpans().Len() - for i := 0; i < rsLen; i++ { - rs := td.ResourceSpans().At(i) - ssLen := rs.ScopeSpans().Len() - - var svc string - svcAtt, ok := rs.Resource().Attributes().Get(semconv.AttributeServiceName) - if ok { - svc = svcAtt.Str() - } - - for j := 0; j < ssLen; j++ { - ss := rs.ScopeSpans().At(j) - spanLen := ss.Spans().Len() - - lastTraceID := "" - for k := 0; k < spanLen; k++ { - span := ss.Spans().At(k) - traceID := span.TraceID().String() - - if p.cfg.Spans { - keyValues := append(p.spanKeyVals(span), p.processKeyVals(rs.Resource(), svc)...) - p.exportToLogsInstance(typeSpan, traceID, p.spanLabels(keyValues), keyValues...) - } - - if p.cfg.Roots && span.ParentSpanID().IsEmpty() { - keyValues := append(p.spanKeyVals(span), p.processKeyVals(rs.Resource(), svc)...) - p.exportToLogsInstance(typeRoot, traceID, p.spanLabels(keyValues), keyValues...) - } - - if p.cfg.Processes && lastTraceID != traceID { - lastTraceID = traceID - keyValues := p.processKeyVals(rs.Resource(), svc) - p.exportToLogsInstance(typeProcess, traceID, p.spanLabels(keyValues), keyValues...) - } - } - } - } - - return p.nextConsumer.ConsumeTraces(ctx, td) -} - -func (p *automaticLoggingProcessor) spanLabels(keyValues []interface{}) model.LabelSet { - if len(keyValues) == 0 { - return model.LabelSet{} - } - ls := make(map[model.LabelName]model.LabelValue, len(keyValues)/2) - var ( - k, v string - ok bool - ) - for i := 0; i < len(keyValues); i += 2 { - if k, ok = keyValues[i].(string); !ok { - // Should never happen, all keys are strings - level.Error(p.logger).Log("msg", "error casting label key to string", "key", keyValues[i]) - continue - } - // Try to cast value to string - if v, ok = keyValues[i+1].(string); !ok { - // If it's not a string, format it to its string representation - v = fmt.Sprintf("%v", keyValues[i+1]) - } - if _, ok := p.labels[k]; ok { - // Loki does not accept "." as a valid character for labels - // Dots . are replaced by underscores _ - k = util.SanitizeLabelName(k) - - ls[model.LabelName(k)] = model.LabelValue(v) - } - } - return ls + return nil } func (p *automaticLoggingProcessor) Capabilities() consumer.Capabilities { @@ -184,16 +98,8 @@ func (p *automaticLoggingProcessor) Capabilities() consumer.Capabilities { // Start is invoked during service startup. func (p *automaticLoggingProcessor) Start(ctx context.Context, _ component.Host) error { - if !p.logToStdout { - logs, ok := ctx.Value(contextkeys.Logs).(*logs.Logs) - if !ok { - return fmt.Errorf("key does not contain a logs instance") - } - p.logsInstance = logs.Instance(p.cfg.LogsName) - if p.logsInstance == nil { - return fmt.Errorf("logs instance %s not found", p.cfg.LogsName) - } - } + // NOTE(rfratto): automaticloggingprocesor only exists for config conversions + // so we don't need any logic here. return nil } @@ -204,109 +110,6 @@ func (p *automaticLoggingProcessor) Shutdown(context.Context) error { return nil } -func (p *automaticLoggingProcessor) processKeyVals(resource pcommon.Resource, svc string) []interface{} { - atts := make([]interface{}, 0, 2) // 2 for service name - rsAtts := resource.Attributes() - - // Add an attribute with the service name - atts = append(atts, p.cfg.Overrides.ServiceKey) - atts = append(atts, svc) - - for _, name := range p.cfg.ProcessAttributes { - att, ok := rsAtts.Get(name) - if ok { - // name/key val pairs - atts = append(atts, name) - atts = append(atts, attributeValue(att)) - } - } - - return atts -} - -func (p *automaticLoggingProcessor) spanKeyVals(span ptrace.Span) []interface{} { - atts := make([]interface{}, 0, 8) // 8 for name, duration, service name and status - - atts = append(atts, p.cfg.Overrides.SpanNameKey) - atts = append(atts, span.Name()) - - atts = append(atts, p.cfg.Overrides.DurationKey) - atts = append(atts, spanDuration(span)) - - // Skip STATUS_CODE_UNSET to be less spammy - if span.Status().Code() != ptrace.StatusCodeUnset { - atts = append(atts, p.cfg.Overrides.StatusKey) - atts = append(atts, span.Status().Code()) - } - - for _, name := range p.cfg.SpanAttributes { - att, ok := span.Attributes().Get(name) - if ok { - atts = append(atts, name) - atts = append(atts, attributeValue(att)) - } - } - - return atts -} - -func (p *automaticLoggingProcessor) exportToLogsInstance(kind string, traceID string, labels model.LabelSet, keyvals ...interface{}) { - if p.done.Load() { - return - } - - keyvals = append(keyvals, []interface{}{p.cfg.Overrides.TraceIDKey, traceID}...) - line, err := logfmt.MarshalKeyvals(keyvals...) - if err != nil { - level.Warn(p.logger).Log("msg", "unable to marshal keyvals", "err", err) - return - } - - // if we're logging to stdout, log and bail - if p.logToStdout { - level.Info(p.logger).Log(keyvals...) - return - } - - // Add logs instance label - labels[model.LabelName(p.cfg.Overrides.LogsTag)] = model.LabelValue(kind) - - sent := p.logsInstance.SendEntry(api.Entry{ - Labels: labels, - Entry: logproto.Entry{ - Timestamp: time.Now(), - Line: string(line), - }, - }, p.cfg.Timeout) - - if !sent { - level.Warn(p.logger).Log("msg", "failed to autolog to logs pipeline", "kind", kind, "traceid", traceID) - } -} - -func spanDuration(span ptrace.Span) string { - dur := int64(span.EndTimestamp() - span.StartTimestamp()) - return strconv.FormatInt(dur, 10) + "ns" -} - -func attributeValue(att pcommon.Value) interface{} { - switch att.Type() { - case pcommon.ValueTypeStr: - return att.Str() - case pcommon.ValueTypeInt: - return att.Int() - case pcommon.ValueTypeDouble: - return att.Double() - case pcommon.ValueTypeBool: - return att.Bool() - case pcommon.ValueTypeMap: - return att.Map() - case pcommon.ValueTypeSlice: - return att.Slice() - } - return nil -} - func override(cfgValue string, defaultValue string) string { if cfgValue == "" { return defaultValue diff --git a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go index 310a09c0ad..b02b7ba7f9 100644 --- a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go +++ b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go @@ -3,174 +3,14 @@ package automaticloggingprocessor import ( "context" "testing" - "time" "github.com/grafana/agent/internal/static/logs" "github.com/grafana/agent/internal/util" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" "gopkg.in/yaml.v3" ) -func TestSpanKeyVals(t *testing.T) { - tests := []struct { - spanName string - spanAttrs map[string]interface{} - spanStart time.Time - spanEnd time.Time - cfg AutomaticLoggingConfig - expected []interface{} - }{ - { - expected: []interface{}{ - "span", "", - "dur", "0ns", - "status", ptrace.StatusCode(1), - }, - }, - { - spanName: "test", - expected: []interface{}{ - "span", "test", - "dur", "0ns", - "status", ptrace.StatusCode(1), - }, - }, - { - expected: []interface{}{ - "span", "", - "dur", "0ns", - "status", ptrace.StatusCode(1), - }, - }, - { - spanStart: time.Unix(0, 0), - spanEnd: time.Unix(0, 10), - expected: []interface{}{ - "span", "", - "dur", "10ns", - "status", ptrace.StatusCode(1), - }, - }, - { - spanStart: time.Unix(0, 10), - spanEnd: time.Unix(0, 100), - expected: []interface{}{ - "span", "", - "dur", "90ns", - "status", ptrace.StatusCode(1), - }, - }, - { - spanAttrs: map[string]interface{}{ - "xstr": "test", - }, - expected: []interface{}{ - "span", "", - "dur", "0ns", - "status", ptrace.StatusCode(1), - }, - }, - { - spanAttrs: map[string]interface{}{ - "xstr": "test", - }, - cfg: AutomaticLoggingConfig{ - SpanAttributes: []string{"xstr"}, - }, - expected: []interface{}{ - "span", "", - "dur", "0ns", - "status", ptrace.StatusCode(1), - "xstr", "test", - }, - }, - { - cfg: AutomaticLoggingConfig{ - Overrides: OverrideConfig{ - SpanNameKey: "a", - DurationKey: "c", - StatusKey: "d", - }, - }, - expected: []interface{}{ - "a", "", - "c", "0ns", - "d", ptrace.StatusCode(1), - }, - }, - } - - for _, tc := range tests { - tc.cfg.Backend = BackendStdout - tc.cfg.Spans = true - p, err := newTraceProcessor(&automaticLoggingProcessor{}, &tc.cfg) - require.NoError(t, err) - - span := ptrace.NewSpan() - span.SetName(tc.spanName) - span.Attributes().FromRaw(tc.spanAttrs) - span.SetStartTimestamp(pcommon.NewTimestampFromTime(tc.spanStart)) - span.SetEndTimestamp(pcommon.NewTimestampFromTime(tc.spanEnd)) - span.Status().SetCode(ptrace.StatusCodeOk) - - actual := p.(*automaticLoggingProcessor).spanKeyVals(span) - assert.Equal(t, tc.expected, actual) - } -} - -func TestProcessKeyVals(t *testing.T) { - tests := []struct { - processAttrs map[string]interface{} - svc string - cfg AutomaticLoggingConfig - expected []interface{} - }{ - { - expected: []interface{}{ - "svc", "", - }, - }, - { - processAttrs: map[string]interface{}{ - "xstr": "test", - }, - expected: []interface{}{ - "svc", "", - }, - }, - { - processAttrs: map[string]interface{}{ - "xstr": "test", - }, - cfg: AutomaticLoggingConfig{ - ProcessAttributes: []string{"xstr"}, - }, - expected: []interface{}{ - "svc", "", - "xstr", "test", - }, - }, - } - - for _, tc := range tests { - tc.cfg.Backend = BackendStdout - tc.cfg.Spans = true - p, err := newTraceProcessor(&automaticLoggingProcessor{}, &tc.cfg) - require.NoError(t, err) - - process := pcommon.NewResource() - process.Attributes().FromRaw(tc.processAttrs) - - actual := p.(*automaticLoggingProcessor).processKeyVals(process, tc.svc) - assert.Equal(t, tc.expected, actual) - } -} - func TestBadConfigs(t *testing.T) { tests := []struct { cfg *AutomaticLoggingConfig @@ -276,81 +116,3 @@ func TestLokiNameMigration(t *testing.T) { require.NoError(t, err) require.YAMLEq(t, expect, string(bb)) } - -func TestLabels(t *testing.T) { - tests := []struct { - name string - labels []string - keyValues []interface{} - expectedLabels model.LabelSet - }{ - { - name: "happy case", - labels: []string{"loki", "svc"}, - keyValues: []interface{}{"loki", "loki", "svc", "gateway", "duration", "1s"}, - expectedLabels: map[model.LabelName]model.LabelValue{ - "loki": "loki", - "svc": "gateway", - }, - }, - { - name: "happy case with dots", - labels: []string{"loki", "service.name"}, - keyValues: []interface{}{"loki", "loki", "service.name", "gateway", "duration", "1s"}, - expectedLabels: map[model.LabelName]model.LabelValue{ - "loki": "loki", - "service_name": "gateway", - }, - }, - { - name: "no labels", - labels: []string{}, - keyValues: []interface{}{"loki", "loki", "svc", "gateway", "duration", "1s"}, - expectedLabels: map[model.LabelName]model.LabelValue{}, - }, - { - name: "label not present in keyValues", - labels: []string{"loki", "svc"}, - keyValues: []interface{}{"loki", "loki", "duration", "1s"}, - expectedLabels: map[model.LabelName]model.LabelValue{ - "loki": "loki", - }, - }, - { - name: "label value is not type string", - labels: []string{"loki"}, - keyValues: []interface{}{"loki", 42, "duration", "1s"}, - expectedLabels: map[model.LabelName]model.LabelValue{ - "loki": "42", - }, - }, - { - name: "stringifies value if possible", - labels: []string{"status"}, - keyValues: []interface{}{"status", ptrace.StatusCode(1)}, - expectedLabels: map[model.LabelName]model.LabelValue{ - "status": model.LabelValue(ptrace.StatusCode(1).String()), - }, - }, - { - name: "no keyValues", - labels: []string{"status"}, - keyValues: []interface{}{}, - expectedLabels: map[model.LabelName]model.LabelValue{}, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - cfg := &AutomaticLoggingConfig{ - Spans: true, - Labels: tc.labels, - } - p, err := newTraceProcessor(&automaticLoggingProcessor{}, cfg) - require.NoError(t, err) - - ls := p.(*automaticLoggingProcessor).spanLabels(tc.keyValues) - assert.Equal(t, tc.expectedLabels, ls) - }) - } -} diff --git a/internal/static/traces/instance.go b/internal/static/traces/instance.go deleted file mode 100644 index 0c2e3fcb19..0000000000 --- a/internal/static/traces/instance.go +++ /dev/null @@ -1,194 +0,0 @@ -package traces - -import ( - "context" - "fmt" - "sync" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/connector" - otelexporter "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/otelcol" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/service" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/traces/automaticloggingprocessor" - "github.com/grafana/agent/internal/static/traces/contextkeys" - "github.com/grafana/agent/internal/static/traces/servicegraphprocessor" - "github.com/grafana/agent/internal/static/traces/traceutils" - "github.com/grafana/agent/internal/util" - prom_client "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/trace/noop" -) - -// Instance wraps the OpenTelemetry collector to enable tracing pipelines -type Instance struct { - mut sync.Mutex - cfg InstanceConfig - logger *zap.Logger - - factories otelcol.Factories - service *service.Service -} - -// NewInstance creates and starts an instance of tracing pipelines. -func NewInstance(logsSubsystem *logs.Logs, reg prom_client.Registerer, cfg InstanceConfig, logger *zap.Logger, promInstanceManager instance.Manager) (*Instance, error) { - instance := &Instance{} - instance.logger = logger - - if err := instance.ApplyConfig(logsSubsystem, promInstanceManager, reg, cfg); err != nil { - return nil, err - } - return instance, nil -} - -// ApplyConfig updates the configuration of the Instance. -func (i *Instance) ApplyConfig(logsSubsystem *logs.Logs, promInstanceManager instance.Manager, reg prom_client.Registerer, cfg InstanceConfig) error { - i.mut.Lock() - defer i.mut.Unlock() - - if util.CompareYAML(cfg, i.cfg) { - // No config change - return nil - } - i.cfg = cfg - - // Shut down any existing pipeline - i.stop() - - err := i.buildAndStartPipeline(context.Background(), cfg, logsSubsystem, promInstanceManager, reg) - if err != nil { - return fmt.Errorf("failed to create pipeline: %w", err) - } - - return nil -} - -// Stop stops the OpenTelemetry collector subsystem -func (i *Instance) Stop() { - i.mut.Lock() - defer i.mut.Unlock() - - i.stop() -} - -func (i *Instance) stop() { - shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - if i.service != nil { - err := i.service.Shutdown(shutdownCtx) - if err != nil { - i.logger.Error("failed to stop Otel service", zap.Error(err)) - } - } -} - -func (i *Instance) buildAndStartPipeline(ctx context.Context, cfg InstanceConfig, logs *logs.Logs, instManager instance.Manager, reg prom_client.Registerer) error { - // create component factories - otelConfig, err := cfg.OtelConfig() - if err != nil { - return fmt.Errorf("failed to load otelConfig from agent traces config: %w", err) - } - for _, rw := range cfg.RemoteWrite { - if rw.InsecureSkipVerify { - i.logger.Warn("Configuring TLS with insecure_skip_verify. Use tls_config.insecure_skip_verify instead") - } - if rw.TLSConfig != nil && rw.TLSConfig.ServerName != "" { - i.logger.Warn("Configuring unsupported tls_config.server_name") - } - } - - if cfg.SpanMetrics != nil && len(cfg.SpanMetrics.MetricsInstance) != 0 { - ctx = context.WithValue(ctx, contextkeys.Metrics, instManager) - } - - if cfg.LoadBalancing == nil && (cfg.TailSampling != nil || cfg.ServiceGraphs != nil) { - i.logger.Warn("Configuring tail_sampling and/or service_graphs without load_balancing." + - "Load balancing via trace ID is required for those features to work properly in multi agent deployments") - } - - if cfg.LoadBalancing == nil && cfg.SpanMetrics != nil { - i.logger.Warn("Configuring spanmetrics without load_balancing." + - "Load balancing via service name is required for spanmetrics to work properly in multi agent deployments") - } - - if cfg.AutomaticLogging != nil && cfg.AutomaticLogging.Backend != automaticloggingprocessor.BackendStdout { - ctx = context.WithValue(ctx, contextkeys.Logs, logs) - } - - factories, err := tracingFactories() - if err != nil { - return fmt.Errorf("failed to load tracing factories: %w", err) - } - i.factories = factories - - appinfo := component.BuildInfo{ - Command: "agent", - Description: "agent", - Version: build.Version, - } - - err = util.SetupStaticModeOtelFeatureGates() - if err != nil { - return err - } - - promExporter, err := traceutils.PrometheusExporter(reg) - if err != nil { - return fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - i.service, err = service.New(ctx, service.Settings{ - BuildInfo: appinfo, - Receivers: receiver.NewBuilder(otelConfig.Receivers, i.factories.Receivers), - Processors: processor.NewBuilder(otelConfig.Processors, i.factories.Processors), - Exporters: otelexporter.NewBuilder(otelConfig.Exporters, i.factories.Exporters), - Connectors: connector.NewBuilder(otelConfig.Connectors, i.factories.Connectors), - Extensions: extension.NewBuilder(otelConfig.Extensions, i.factories.Extensions), - OtelMetricViews: servicegraphprocessor.OtelMetricViews(), - OtelMetricReader: promExporter, - DisableProcessMetrics: true, - UseExternalMetricsServer: true, - TracerProvider: noop.NewTracerProvider(), - //TODO: Plug in an AsyncErrorChannel to shut down the Agent in case of a fatal event - LoggingOptions: []zap.Option{ - zap.WrapCore(func(zapcore.Core) zapcore.Core { - return i.logger.Core() - }), - }, - }, otelConfig.Service) - if err != nil { - return fmt.Errorf("failed to create Otel service: %w", err) - } - - err = i.service.Start(ctx) - if err != nil { - return fmt.Errorf("failed to start Otel service: %w", err) - } - - return err -} - -// ReportFatalError implements component.Host -func (i *Instance) ReportFatalError(err error) { - i.logger.Error("fatal error reported", zap.Error(err)) -} - -// GetFactory implements component.Host -func (i *Instance) GetFactory(kind component.Kind, componentType component.Type) component.Factory { - switch kind { - case component.KindReceiver: - return i.factories.Receivers[componentType] - default: - return nil - } -} diff --git a/internal/static/traces/remotewriteexporter/exporter.go b/internal/static/traces/remotewriteexporter/exporter.go index 5f99af577a..cec6e77d56 100644 --- a/internal/static/traces/remotewriteexporter/exporter.go +++ b/internal/static/traces/remotewriteexporter/exporter.go @@ -2,115 +2,28 @@ package remotewriteexporter import ( "context" - "fmt" - "strconv" - "strings" - "sync" - "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/traces/contextkeys" - util "github.com/grafana/agent/internal/util/log" - "github.com/prometheus/prometheus/model/labels" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" ) -const ( - nameLabelKey = "__name__" - sumSuffix = "sum" - countSuffix = "count" - bucketSuffix = "bucket" - leStr = "le" - infBucket = "+Inf" - noSuffix = "" -) - -type datapoint struct { - ts int64 - v float64 - l labels.Labels -} - -type remoteWriteExporter struct { - mtx sync.Mutex - - close chan struct{} - closed chan struct{} - - manager instance.Manager - promInstance string - - constLabels labels.Labels - namespace string - - seriesMap map[uint64]*datapoint - staleTime int64 - lastFlush int64 - loopInterval time.Duration - - logger log.Logger -} +type remoteWriteExporter struct{} func newRemoteWriteExporter(cfg *Config) (exporter.Metrics, error) { - logger := log.With(util.Logger, "component", "traces remote write exporter") - - ls := make(labels.Labels, 0, len(cfg.ConstLabels)) - - for name, value := range cfg.ConstLabels { - ls = append(ls, labels.Label{Name: name, Value: value}) - } - - staleTime := (15 * time.Minute).Milliseconds() - if cfg.StaleTime > 0 { - staleTime = cfg.StaleTime.Milliseconds() - } - - loopInterval := time.Second - if cfg.LoopInterval > 0 { - loopInterval = cfg.LoopInterval - } - - return &remoteWriteExporter{ - mtx: sync.Mutex{}, - close: make(chan struct{}), - closed: make(chan struct{}), - constLabels: ls, - namespace: cfg.Namespace, - promInstance: cfg.PromInstance, - seriesMap: make(map[uint64]*datapoint), - staleTime: staleTime, - loopInterval: loopInterval, - logger: logger, - }, nil + // NOTE(rfratto): remotewriteexporter has been kept for config conversions, + // but is never used, so the implementation of the component has been + // removed. + return &remoteWriteExporter{}, nil } func (e *remoteWriteExporter) Start(ctx context.Context, _ component.Host) error { - manager, ok := ctx.Value(contextkeys.Metrics).(instance.Manager) - if !ok || manager == nil { - return fmt.Errorf("key does not contain a InstanceManager instance") - } - e.manager = manager - - go e.appenderLoop() - return nil } func (e *remoteWriteExporter) Shutdown(ctx context.Context) error { - close(e.close) - - select { - case <-e.closed: - return nil - case <-ctx.Done(): - return ctx.Err() - } + return nil } func (e *remoteWriteExporter) Capabilities() consumer.Capabilities { @@ -118,202 +31,5 @@ func (e *remoteWriteExporter) Capabilities() consumer.Capabilities { } func (e *remoteWriteExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { - select { - case <-e.closed: - return nil - default: - } - - resourceMetrics := md.ResourceMetrics() - for i := 0; i < resourceMetrics.Len(); i++ { - resourceMetric := resourceMetrics.At(i) - scopeMetricsSlice := resourceMetric.ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { - metricSlice := scopeMetricsSlice.At(j).Metrics() - for k := 0; k < metricSlice.Len(); k++ { - switch metric := metricSlice.At(k); metric.Type() { - case pmetric.MetricTypeGauge: - dataPoints := metric.Sum().DataPoints() - if err := e.handleNumberDataPoints(metric.Name(), dataPoints); err != nil { - return err - } - case pmetric.MetricTypeSum: - if metric.Sum().AggregationTemporality() != pmetric.AggregationTemporalityCumulative { - continue // Only cumulative metrics are supported - } - dataPoints := metric.Sum().DataPoints() - if err := e.handleNumberDataPoints(metric.Name(), dataPoints); err != nil { - return err - } - case pmetric.MetricTypeHistogram: - if metric.Histogram().AggregationTemporality() != pmetric.AggregationTemporalityCumulative { - continue // Only cumulative metrics are supported - } - dataPoints := metric.Histogram().DataPoints() - e.handleHistogramDataPoints(metric.Name(), dataPoints) - case pmetric.MetricTypeSummary: - return fmt.Errorf("unsupported metric data type %s", metric.Type()) - default: - return fmt.Errorf("unsupported metric data type %s", metric.Type()) - } - } - } - } - - return nil -} - -func (e *remoteWriteExporter) handleNumberDataPoints(name string, dataPoints pmetric.NumberDataPointSlice) error { - for ix := 0; ix < dataPoints.Len(); ix++ { - dataPoint := dataPoints.At(ix) - lbls := e.createLabelSet(name, noSuffix, dataPoint.Attributes(), labels.Labels{}) - if err := e.appendNumberDataPoint(dataPoint, lbls); err != nil { - return fmt.Errorf("failed to process datapoints %s", err) - } - } - return nil -} - -func (e *remoteWriteExporter) appendNumberDataPoint(dataPoint pmetric.NumberDataPoint, labels labels.Labels) error { - var val float64 - switch dataPoint.ValueType() { - case pmetric.NumberDataPointValueTypeDouble: - val = dataPoint.DoubleValue() - case pmetric.NumberDataPointValueTypeInt: - val = float64(dataPoint.IntValue()) - default: - return fmt.Errorf("unknown data point type: %s", dataPoint.ValueType()) - } - ts := e.timestamp() - - e.appendDatapointForSeries(labels, ts, val) - return nil } - -func (e *remoteWriteExporter) handleHistogramDataPoints(name string, dataPoints pmetric.HistogramDataPointSlice) { - for ix := 0; ix < dataPoints.Len(); ix++ { - dataPoint := dataPoints.At(ix) - ts := e.timestamp() - - // Append sum value - sumLabels := e.createLabelSet(name, sumSuffix, dataPoint.Attributes(), labels.Labels{}) - e.appendDatapointForSeries(sumLabels, ts, dataPoint.Sum()) - - // Append count value - countLabels := e.createLabelSet(name, countSuffix, dataPoint.Attributes(), labels.Labels{}) - e.appendDatapointForSeries(countLabels, ts, float64(dataPoint.Count())) - - var cumulativeCount uint64 - for ix := 0; ix < dataPoint.ExplicitBounds().Len(); ix++ { - eb := dataPoint.ExplicitBounds().At(ix) - - if ix >= dataPoint.BucketCounts().Len() { - break - } - cumulativeCount += dataPoint.BucketCounts().At(ix) - boundStr := strconv.FormatFloat(eb, 'f', -1, 64) - bucketLabels := e.createLabelSet(name, bucketSuffix, dataPoint.Attributes(), labels.Labels{{Name: leStr, Value: boundStr}}) - e.appendDatapointForSeries(bucketLabels, ts, float64(cumulativeCount)) - } - - // add le=+Inf bucket - cumulativeCount += dataPoint.BucketCounts().At(dataPoint.BucketCounts().Len() - 1) - infBucketLabels := e.createLabelSet(name, bucketSuffix, dataPoint.Attributes(), labels.Labels{{Name: leStr, Value: infBucket}}) - e.appendDatapointForSeries(infBucketLabels, ts, float64(cumulativeCount)) - } -} - -func (e *remoteWriteExporter) appendDatapointForSeries(l labels.Labels, ts int64, v float64) { - e.mtx.Lock() - defer e.mtx.Unlock() - - series := l.Hash() - if lastDatapoint, ok := e.seriesMap[series]; ok { - if lastDatapoint.ts >= ts { - return - } - lastDatapoint.ts = ts - lastDatapoint.v = v - return - } - - e.seriesMap[series] = &datapoint{l: l, ts: ts, v: v} -} - -func (e *remoteWriteExporter) appenderLoop() { - t := time.NewTicker(e.loopInterval) - - for { - select { - case <-t.C: - e.mtx.Lock() - inst, err := e.manager.GetInstance(e.promInstance) - if err != nil { - level.Error(e.logger).Log("msg", "failed to get prom instance", "err", err) - continue - } - appender := inst.Appender(context.Background()) - - now := time.Now().UnixMilli() - for _, dp := range e.seriesMap { - // If the datapoint hasn't been updated since the last loop, don't append it - if dp.ts < e.lastFlush { - // If the datapoint is older than now - staleTime, it is stale and gets removed. - if now-dp.ts > e.staleTime { - delete(e.seriesMap, dp.l.Hash()) - } - continue - } - - if _, err := appender.Append(0, dp.l, dp.ts, dp.v); err != nil { - level.Error(e.logger).Log("msg", "failed to append datapoint", "err", err) - } - } - - if err := appender.Commit(); err != nil { - level.Error(e.logger).Log("msg", "failed to commit appender", "err", err) - } - - e.lastFlush = now - e.mtx.Unlock() - - case <-e.close: - close(e.closed) - return - } - } -} - -func (e *remoteWriteExporter) createLabelSet(name, suffix string, labelMap pcommon.Map, customLabels labels.Labels) labels.Labels { - ls := make(labels.Labels, 0, labelMap.Len()+1+len(e.constLabels)+len(customLabels)) - // Labels from spanmetrics processor - labelMap.Range(func(k string, v pcommon.Value) bool { - ls = append(ls, labels.Label{ - Name: strings.Replace(k, ".", "_", -1), - Value: v.Str(), - }) - return true - }) - // Metric name label - ls = append(ls, labels.Label{ - Name: nameLabelKey, - Value: metricName(e.namespace, name, suffix), - }) - // Const labels - ls = append(ls, e.constLabels...) - // Custom labels - ls = append(ls, customLabels...) - return ls -} - -func (e *remoteWriteExporter) timestamp() int64 { - return time.Now().UnixMilli() -} - -func metricName(namespace, metric, suffix string) string { - if len(suffix) != 0 { - return fmt.Sprintf("%s_%s_%s", namespace, metric, suffix) - } - return fmt.Sprintf("%s_%s", namespace, metric) -} diff --git a/internal/static/traces/remotewriteexporter/exporter_test.go b/internal/static/traces/remotewriteexporter/exporter_test.go deleted file mode 100644 index 63025ec120..0000000000 --- a/internal/static/traces/remotewriteexporter/exporter_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package remotewriteexporter - -import ( - "context" - "testing" - "time" - - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/static/traces/contextkeys" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/storage" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -const ( - callsMetric = "traces_spanmetrics_calls_total" - sumMetric = "traces_spanmetrics_latency_sum" - countMetric = "traces_spanmetrics_latency_count" - bucketMetric = "traces_spanmetrics_latency_bucket" -) - -func TestRemoteWriteExporter_ConsumeMetrics(t *testing.T) { - var ( - countValue uint64 = 20 - sumValue float64 = 100 - bucketCounts = []uint64{1, 2, 3, 4, 5, 6} - explicitBounds = []float64{1, 2.5, 5, 7.5, 10} - ts = time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC) - ) - - cfg := Config{ - ConstLabels: nil, - Namespace: "traces", - PromInstance: "traces", - } - exp, err := newRemoteWriteExporter(&cfg) - require.NoError(t, err) - - manager := &mockManager{} - ctx := context.WithValue(context.Background(), contextkeys.Metrics, manager) - require.NoError(t, exp.Start(ctx, nil)) - - metrics := pmetric.NewMetrics() - ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() - ilm.Scope().SetName("spanmetrics") - - // Append sum metric - sm := ilm.Metrics().AppendEmpty() - sm.SetEmptySum() - sm.SetName("spanmetrics_calls_total") - sm.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - sdp := sm.Sum().DataPoints().AppendEmpty() - sdp.SetTimestamp(pcommon.NewTimestampFromTime(ts.UTC())) - sdp.SetDoubleValue(sumValue) - - // Append histogram - hm := ilm.Metrics().AppendEmpty() - hm.SetEmptyHistogram() - hm.SetName("spanmetrics_latency") - hm.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - - hdp := hm.Histogram().DataPoints().AppendEmpty() - hdp.SetTimestamp(pcommon.NewTimestampFromTime(ts.UTC())) - hdp.BucketCounts().FromRaw(bucketCounts) - hdp.ExplicitBounds().FromRaw(explicitBounds) - hdp.SetCount(countValue) - hdp.SetSum(sumValue) - - err = exp.ConsumeMetrics(context.TODO(), metrics) - require.NoError(t, err) - - time.Sleep(5 * time.Second) - - require.NoError(t, exp.Shutdown(context.TODO())) - - // Verify calls - calls := manager.instance.GetAppended(callsMetric) - require.Equal(t, len(calls), 1) - require.Equal(t, calls[0].v, sumValue) - require.Equal(t, calls[0].l, labels.Labels{{Name: nameLabelKey, Value: "traces_spanmetrics_calls_total"}}) - - // Verify _sum - sum := manager.instance.GetAppended(sumMetric) - require.Equal(t, len(sum), 1) - require.Equal(t, sum[0].v, sumValue) - require.Equal(t, sum[0].l, labels.Labels{{Name: nameLabelKey, Value: "traces_spanmetrics_latency_" + sumSuffix}}) - - // Check _count - count := manager.instance.GetAppended(countMetric) - require.Equal(t, len(count), 1) - require.Equal(t, count[0].v, float64(countValue)) - require.Equal(t, count[0].l, labels.Labels{{Name: nameLabelKey, Value: "traces_spanmetrics_latency_" + countSuffix}}) - - // Check _bucket - buckets := manager.instance.GetAppended(bucketMetric) - require.Equal(t, len(buckets), len(bucketCounts)) -} - -type mockManager struct { - instance *mockInstance -} - -func (m *mockManager) GetInstance(string) (instance.ManagedInstance, error) { - if m.instance == nil { - m.instance = &mockInstance{} - } - return m.instance, nil -} - -func (m *mockManager) ListInstances() map[string]instance.ManagedInstance { return nil } - -func (m *mockManager) ListConfigs() map[string]instance.Config { return nil } - -func (m *mockManager) ApplyConfig(_ instance.Config) error { return nil } - -func (m *mockManager) DeleteConfig(_ string) error { return nil } - -func (m *mockManager) Stop() {} - -type mockInstance struct { - instance.NoOpInstance - appender *mockAppender -} - -func (m *mockInstance) Appender(_ context.Context) storage.Appender { - if m.appender == nil { - m.appender = &mockAppender{} - } - return m.appender -} - -func (m *mockInstance) GetAppended(n string) []metric { - return m.appender.GetAppended(n) -} - -type metric struct { - l labels.Labels - t int64 - v float64 -} - -type mockAppender struct { - appendedMetrics []metric -} - -var _ storage.Appender = (*mockAppender)(nil) - -func (a *mockAppender) GetAppended(n string) []metric { - var ms []metric - for _, m := range a.appendedMetrics { - if n == m.l.Get(nameLabelKey) { - ms = append(ms, m) - } - } - return ms -} - -func (a *mockAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - a.appendedMetrics = append(a.appendedMetrics, metric{l: l, t: t, v: v}) - return 0, nil -} - -func (a *mockAppender) Commit() error { return nil } - -func (a *mockAppender) Rollback() error { return nil } - -func (a *mockAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { - return 0, nil -} - -func (a *mockAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { - return 0, nil -} - -func (a *mockAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, nil -} diff --git a/internal/static/traces/traces.go b/internal/static/traces/traces.go deleted file mode 100644 index 3226e8084d..0000000000 --- a/internal/static/traces/traces.go +++ /dev/null @@ -1,111 +0,0 @@ -package traces - -import ( - "fmt" - "sync" - - "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util/zapadapter" - prom_client "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// Traces wraps the OpenTelemetry collector to enable tracing pipelines -type Traces struct { - mut sync.Mutex - instances map[string]*Instance - - logger *zap.Logger - reg prom_client.Registerer - - promInstanceManager instance.Manager -} - -// New creates and starts trace collection. -func New(logsSubsystem *logs.Logs, promInstanceManager instance.Manager, reg prom_client.Registerer, cfg Config, l log.Logger) (*Traces, error) { - traces := &Traces{ - instances: make(map[string]*Instance), - logger: newLogger(l), - reg: reg, - promInstanceManager: promInstanceManager, - } - if err := traces.ApplyConfig(logsSubsystem, promInstanceManager, cfg); err != nil { - return nil, err - } - return traces, nil -} - -// Instance is used to retrieve a named Traces instance -func (t *Traces) Instance(name string) *Instance { - t.mut.Lock() - defer t.mut.Unlock() - - return t.instances[name] -} - -// ApplyConfig updates Traces with a new Config. -func (t *Traces) ApplyConfig(logsSubsystem *logs.Logs, promInstanceManager instance.Manager, cfg Config) error { - t.mut.Lock() - defer t.mut.Unlock() - - newInstances := make(map[string]*Instance, len(cfg.Configs)) - - for _, c := range cfg.Configs { - var ( - instReg = prom_client.WrapRegistererWith(prom_client.Labels{"traces_config": c.Name}, t.reg) - ) - - // If an old instance exists, update it and move it to the new map. - if old, ok := t.instances[c.Name]; ok { - err := old.ApplyConfig(logsSubsystem, promInstanceManager, instReg, c) - if err != nil { - return err - } - - newInstances[c.Name] = old - continue - } - - var ( - instLogger = t.logger.With(zap.String("traces_config", c.Name)) - ) - - inst, err := NewInstance(logsSubsystem, instReg, c, instLogger, t.promInstanceManager) - if err != nil { - return fmt.Errorf("failed to create tracing instance %s: %w", c.Name, err) - } - newInstances[c.Name] = inst - } - - // Any instance in l.instances that isn't in newInstances has been removed - // from the config. Stop them before replacing the map. - for key, i := range t.instances { - if _, exist := newInstances[key]; exist { - continue - } - i.Stop() - } - t.instances = newInstances - - return nil -} - -// Stop stops the OpenTelemetry collector subsystem -func (t *Traces) Stop() { - t.mut.Lock() - defer t.mut.Unlock() - - for _, i := range t.instances { - i.Stop() - } -} - -func newLogger(l log.Logger) *zap.Logger { - logger := zapadapter.New(l) - logger = logger.With(zap.String("component", "traces")) - logger.Info("Traces Logger Initialized") - - return logger -} diff --git a/internal/static/traces/traces_test.go b/internal/static/traces/traces_test.go deleted file mode 100644 index 5fc3fa4d88..0000000000 --- a/internal/static/traces/traces_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package traces - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/static/traces/traceutils" - "github.com/grafana/agent/internal/util" - "github.com/grafana/dskit/log" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - jaegercfg "github.com/uber/jaeger-client-go/config" - "go.opentelemetry.io/collector/pdata/ptrace" - "gopkg.in/yaml.v2" -) - -func TestTraces(t *testing.T) { - tracesCh := make(chan ptrace.Traces) - tracesAddr := traceutils.NewTestServer(t, func(t ptrace.Traces) { - tracesCh <- t - }) - - tracesCfgText := util.Untab(fmt.Sprintf(` -configs: -- name: default - receivers: - jaeger: - protocols: - thrift_compact: - remote_write: - - endpoint: %s - insecure: true - batch: - timeout: 100ms - send_batch_size: 1 - `, tracesAddr)) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(tracesCfgText)) - dec.SetStrict(true) - err := dec.Decode(&cfg) - require.NoError(t, err) - - var loggingLevel log.Level - require.NoError(t, loggingLevel.Set("debug")) - - traces, err := New(nil, nil, prometheus.NewRegistry(), cfg, &server.HookLogger{}) - require.NoError(t, err) - t.Cleanup(traces.Stop) - - tr := testJaegerTracer(t) - span := tr.StartSpan("test-span") - span.Finish() - - select { - case <-time.After(30 * time.Second): - require.Fail(t, "failed to receive a span after 30 seconds") - case tr := <-tracesCh: - require.Equal(t, 1, tr.SpanCount()) - // Nothing to do, send succeeded. - } -} - -func TestTraceWithSpanmetricsConfig(t *testing.T) { - tracesCfgText := util.Untab(` -configs: -- name: test - receivers: - zipkin: - endpoint: 0.0.0.0:9999 - remote_write: - - endpoint: 0.0.0.0:5555 - insecure: false - tls_config: - insecure_skip_verify: true - spanmetrics: - handler_endpoint: 0.0.0.0:9090 - const_labels: - key1: "value1" - key2: "value2" - `) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(tracesCfgText)) - dec.SetStrict(true) - err := dec.Decode(&cfg) - require.NoError(t, err) - - var loggingLevel log.Level - require.NoError(t, loggingLevel.Set("debug")) - - traces, err := New(nil, nil, prometheus.NewRegistry(), cfg, &server.HookLogger{}) - require.NoError(t, err) - t.Cleanup(traces.Stop) -} - -func TestTrace_ApplyConfig(t *testing.T) { - tracesCh := make(chan ptrace.Traces) - tracesAddr := traceutils.NewTestServer(t, func(t ptrace.Traces) { - tracesCh <- t - }) - - tracesCfgText := util.Untab(` -configs: -- name: default - receivers: - jaeger: - protocols: - thrift_compact: - remote_write: - - endpoint: 127.0.0.1:80 # deliberately the wrong endpoint - insecure: true - batch: - timeout: 100ms - send_batch_size: 1 - service_graphs: - enabled: true -`) - - var cfg Config - dec := yaml.NewDecoder(strings.NewReader(tracesCfgText)) - dec.SetStrict(true) - err := dec.Decode(&cfg) - require.NoError(t, err) - - traces, err := New(nil, nil, prometheus.NewRegistry(), cfg, &server.HookLogger{}) - require.NoError(t, err) - t.Cleanup(traces.Stop) - - // Fix the config and apply it before sending spans. - tracesCfgText = util.Untab(fmt.Sprintf(` -configs: -- name: default - receivers: - jaeger: - protocols: - thrift_compact: - remote_write: - - endpoint: %s - insecure: true - batch: - timeout: 100ms - send_batch_size: 1 - `, tracesAddr)) - - var fixedConfig Config - dec = yaml.NewDecoder(strings.NewReader(tracesCfgText)) - dec.SetStrict(true) - err = dec.Decode(&fixedConfig) - require.NoError(t, err) - - err = traces.ApplyConfig(nil, nil, fixedConfig) - require.NoError(t, err) - - tr := testJaegerTracer(t) - span := tr.StartSpan("test-span") - span.Finish() - - select { - case <-time.After(30 * time.Second): - require.Fail(t, "failed to receive a span after 30 seconds") - case tr := <-tracesCh: - require.Equal(t, 1, tr.SpanCount()) - // Nothing to do, send succeeded. - } -} - -func testJaegerTracer(t *testing.T) opentracing.Tracer { - t.Helper() - - jaegerConfig := jaegercfg.Configuration{ - ServiceName: "TestTraces", - Sampler: &jaegercfg.SamplerConfig{ - Type: "const", - Param: 1, - }, - Reporter: &jaegercfg.ReporterConfig{ - LocalAgentHostPort: "127.0.0.1:6831", - LogSpans: true, - }, - } - tr, closer, err := jaegerConfig.NewTracer() - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, closer.Close()) - }) - - return tr -} diff --git a/internal/util/log/log.go b/internal/util/log/log.go index 9983946e61..8cd0948d57 100644 --- a/internal/util/log/log.go +++ b/internal/util/log/log.go @@ -7,123 +7,9 @@ package log import ( - "fmt" - "os" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - dskit "github.com/grafana/dskit/log" - "github.com/grafana/dskit/server" - "github.com/prometheus/client_golang/prometheus" ) var ( Logger = log.NewNopLogger() - - logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_messages_total", - Help: "Total number of log messages.", - }, []string{"level"}) - - supportedLevels = []level.Value{ - level.DebugValue(), - level.InfoValue(), - level.WarnValue(), - level.ErrorValue(), - } ) - -func init() { - prometheus.MustRegister(logMessages) -} - -// InitLogger initialises the global gokit logger (util_log.Logger) and overrides the -// default logger for the server. -func InitLogger(cfg *server.Config) { - l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat) - if err != nil { - panic(err) - } - - // when use util_log.Logger, skip 3 stack frames. - Logger = log.With(l, "caller", log.Caller(3)) - - // cfg.Log wraps log function, skip 4 stack frames to get caller information. - // this works in go 1.12, but doesn't work in versions earlier. - // it will always shows the wrapper function generated by compiler - // marked in old versions. - cfg.Log = log.With(l, "caller", log.Caller(4)) -} - -// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels. -type PrometheusLogger struct { - logger log.Logger -} - -// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes -// Prometheus counters for various log levels. -func NewPrometheusLogger(l dskit.Level, format string) (log.Logger, error) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - if format == "json" { - logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) - } - logger = level.NewFilter(logger, LevelFilter(l.String())) - - // Initialise counters for all supported levels: - for _, level := range supportedLevels { - logMessages.WithLabelValues(level.String()) - } - - logger = &PrometheusLogger{ - logger: logger, - } - - // return a Logger without caller information, shouldn't use directly - logger = log.With(logger, "ts", log.DefaultTimestampUTC) - return logger, nil -} - -// Log increments the appropriate Prometheus counter depending on the log level. -func (pl *PrometheusLogger) Log(kv ...interface{}) error { - pl.logger.Log(kv...) - l := "unknown" - for i := 1; i < len(kv); i += 2 { - if v, ok := kv[i].(level.Value); ok { - l = v.String() - break - } - } - logMessages.WithLabelValues(l).Inc() - return nil -} - -// CheckFatal prints an error and exits with error code 1 if err is non-nil -func CheckFatal(location string, err error) { - if err != nil { - logger := level.Error(Logger) - if location != "" { - logger = log.With(logger, "msg", "error "+location) - } - // %+v gets the stack trace from errors using github.com/pkg/errors - logger.Log("err", fmt.Sprintf("%+v", err)) - os.Exit(1) - } -} - -// TODO(dannyk): remove once weaveworks/common updates to go-kit/log -// -// -> we can then revert to using Level.Gokit -func LevelFilter(l string) level.Option { - switch l { - case "debug": - return level.AllowDebug() - case "info": - return level.AllowInfo() - case "warn": - return level.AllowWarn() - case "error": - return level.AllowError() - default: - return level.AllowAll() - } -} diff --git a/internal/util/otel_feature_gate.go b/internal/util/otel_feature_gate.go index d2f4797668..643f1e4773 100644 --- a/internal/util/otel_feature_gate.go +++ b/internal/util/otel_feature_gate.go @@ -7,43 +7,7 @@ import ( _ "go.opentelemetry.io/collector/obsreport" ) -// Enables a set of feature gates in Otel's Global Feature Gate Registry. -func EnableOtelFeatureGates(fgNames ...string) error { - fgReg := featuregate.GlobalRegistry() - - for _, fg := range fgNames { - err := fgReg.Set(fg, true) - if err != nil { - return fmt.Errorf("error setting Otel feature gate: %w", err) - } - } - - return nil -} - var ( - // useOtelForInternalMetrics is required so that the Collector service configures Collector components using the Otel SDK - // instead of OpenCensus. If this is not specified, then the OtelMetricViews and OtelMetricReader parameters which we - // pass to service.New() below will not be taken into account. This would mean that metrics from custom components such as - // the one in pkg/traces/servicegraphprocessor would not work. - // - // disableHighCardinalityMetrics is required so that we don't include labels containing ports and IP addresses in gRPC metrics. - // Example metric with high cardinality... - // rpc_server_duration_bucket{net_sock_peer_addr="127.0.0.1",net_sock_peer_port="59947",rpc_grpc_status_code="0",rpc_method="Export",rpc_service="opentelemetry.proto.collector.trace.v1.TraceService",rpc_system="grpc",traces_config="default",le="7500"} 294 - // ... the same metric when disableHighCardinalityMetrics is switched on looks like this: - // rpc_server_duration_bucket{rpc_grpc_status_code="0",rpc_method="Export",rpc_service="opentelemetry.proto.collector.trace.v1.TraceService",rpc_system="grpc",traces_config="default",le="7500"} 32 - // For more context: - // https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/rpc-metrics/ - // https://github.com/open-telemetry/opentelemetry-go-contrib/pull/2700 - // https://github.com/open-telemetry/opentelemetry-collector/pull/6788/files - // - // TODO: Remove "telemetry.useOtelForInternalMetrics" when Collector components - // use OpenTelemetry metrics by default. - staticModeOtelFeatureGates = []string{ - "telemetry.useOtelForInternalMetrics", - "telemetry.disableHighCardinalityMetrics", - } - // Enable the "telemetry.useOtelForInternalMetrics" Collector feature gate. // Currently, Collector components uses OpenCensus metrics by default. // Those metrics cannot be integrated with Agent Flow, @@ -56,12 +20,21 @@ var ( } ) -// Enables a set of feature gates which should always be enabled for Static mode. -func SetupStaticModeOtelFeatureGates() error { - return EnableOtelFeatureGates(staticModeOtelFeatureGates...) -} - // Enables a set of feature gates which should always be enabled for Flow mode. func SetupFlowModeOtelFeatureGates() error { return EnableOtelFeatureGates(flowModeOtelFeatureGates...) } + +// Enables a set of feature gates in Otel's Global Feature Gate Registry. +func EnableOtelFeatureGates(fgNames ...string) error { + fgReg := featuregate.GlobalRegistry() + + for _, fg := range fgNames { + err := fgReg.Set(fg, true) + if err != nil { + return fmt.Errorf("error setting Otel feature gate: %w", err) + } + } + + return nil +} diff --git a/internal/util/otel_feature_gate_test.go b/internal/util/otel_feature_gate_test.go index d4b49ea92c..e3809de8cb 100644 --- a/internal/util/otel_feature_gate_test.go +++ b/internal/util/otel_feature_gate_test.go @@ -15,9 +15,6 @@ func Test_FeatureGates(t *testing.T) { fgSet := make(map[string]struct{}) - for _, fg := range staticModeOtelFeatureGates { - fgSet[fg] = struct{}{} - } for _, fg := range flowModeOtelFeatureGates { fgSet[fg] = struct{}{} } @@ -34,7 +31,6 @@ func Test_FeatureGates(t *testing.T) { require.Falsef(t, g.IsEnabled(), "feature gate %s is enabled - should it be removed from the Agent?", g.ID()) }) - require.NoError(t, SetupStaticModeOtelFeatureGates()) require.NoError(t, SetupFlowModeOtelFeatureGates()) reg.VisitAll(func(g *featuregate.Gate) { diff --git a/internal/util/sanitize.go b/internal/util/sanitize.go deleted file mode 100644 index f47595b3aa..0000000000 --- a/internal/util/sanitize.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -import "regexp" - -var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - -// SanitizeLabelName sanitizes a label name for Prometheus. -func SanitizeLabelName(name string) string { - return invalidLabelCharRE.ReplaceAllString(name, "_") -} diff --git a/internal/util/structwalk/structwalk.go b/internal/util/structwalk/structwalk.go deleted file mode 100644 index a1cce56948..0000000000 --- a/internal/util/structwalk/structwalk.go +++ /dev/null @@ -1,77 +0,0 @@ -// Package structwalk allows you to "walk" the hierarchy of a struct. It is -// very similar to github.com/mitchellh/reflectwalk but allows you to change -// the visitor mid-walk. -package structwalk - -import ( - "reflect" - - "github.com/mitchellh/reflectwalk" -) - -// Walk traverses the hierarchy of o in depth-first order. It starts by calling -// v.Visit(o). If the visitor w returned by v.Visit(o) is not nil, Walk is -// invoked recursively with visitor w for each of the structs inside of o, -// followed by a call to w.Visit(nil). -// -// o must be non-nil. -func Walk(v Visitor, o interface{}) { - sw := structWalker{v: v} - _ = reflectwalk.Walk(o, &sw) -} - -// Visitor will have its Visit method invoked for each struct value encountered -// by Walk. If w returned from Visit is non-nil, Walk will then visit each child -// of value with w. The final call after visiting all children will be to -// w.Visit(nil). -type Visitor interface { - Visit(value interface{}) (w Visitor) -} - -type structWalker struct { - cur interface{} - v Visitor -} - -// Struct invoke the Visitor for v and its children. -func (sw *structWalker) Struct(v reflect.Value) error { - // structWalker will walk absolutely all fields, even unexported fields or - // types. We can only interface exported fields, so we need to abort early - // for anything that's not supported. - if !v.CanInterface() { - return nil - } - - // Get the interface to the value. reflectwalk will fully derefernce all - // structs, so if it's possible for us to get address it into a pointer, - // we will use that for visiting. - var ( - rawValue = v.Interface() - ptrValue = rawValue - ) - if v.Kind() != reflect.Ptr && v.CanAddr() { - ptrValue = v.Addr().Interface() - } - - // Struct will recursively call reflectwalk.Walk with a new walker, which - // means that sw.Struct will be called twice for the same value. We want - // to ignore calls to Struct with the same value so we don't recurse - // infinitely. - if sw.cur != nil && reflect.DeepEqual(rawValue, sw.cur) { - return nil - } - - // Visit our struct and create a new walker with the returned Visitor. - w := sw.v.Visit(ptrValue) - if w == nil { - return reflectwalk.SkipEntry - } - _ = reflectwalk.Walk(rawValue, &structWalker{cur: rawValue, v: w}) - w.Visit(nil) - - return reflectwalk.SkipEntry -} - -func (sw *structWalker) StructField(reflect.StructField, reflect.Value) error { - return nil -} diff --git a/internal/util/structwalk/structwalk_test.go b/internal/util/structwalk/structwalk_test.go deleted file mode 100644 index 44d1263f22..0000000000 --- a/internal/util/structwalk/structwalk_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package structwalk - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -type LevelA struct { - Field1 bool - Field2 string - Field3 int - Nested LevelB -} - -type LevelB struct { - Level1 bool - Level2 string - Field3 int - Nested LevelC -} - -type LevelC struct { - Level1 bool - Level2 string - Field3 int -} - -func TestWalk(t *testing.T) { - var ( - iteration int - fv FuncVisitor - ) - fv = func(val interface{}) Visitor { - iteration++ - - // After visiting all 3 structs, should receive a w.Visit(nil) for each level - if iteration >= 4 { - require.Nil(t, val) - return nil - } - - switch iteration { - case 1: - require.IsType(t, LevelA{}, val) - case 2: - require.IsType(t, LevelB{}, val) - case 3: - require.IsType(t, LevelC{}, val) - default: - require.FailNow(t, "unexpected iteration") - } - - return fv - } - - var val LevelA - Walk(fv, val) -} - -type FuncVisitor func(v interface{}) Visitor - -func (fv FuncVisitor) Visit(v interface{}) Visitor { return fv(v) } diff --git a/internal/util/subset/subset.go b/internal/util/subset/subset.go deleted file mode 100644 index 6f6561b2ed..0000000000 --- a/internal/util/subset/subset.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package subset implements functions to check if one value is a subset of -// another. -package subset - -import ( - "fmt" - "reflect" - - "gopkg.in/yaml.v2" -) - -// Assert checks whether target is a subset of source. source and target must -// be the same type. target is a subset of source when: -// -// - If target and source are slices or arrays, then target must have the same -// number of elements as source. Each element in target must be a subset of -// the corresponding element from source. -// -// - If target and source are maps, each key in source must exist in target. -// The value for each element in target must be a subset of the corresponding -// element from source. -// -// - Otherwise, target and source must be deeply equal. -// -// An instance of Error will be returned when target is not a subset of source. -// -// Subset checking is primarily useful when doing things like YAML assertions, -// where you only want to ensure that a subset of YAML is defined as expected. -func Assert(source, target interface{}) error { - return assert(reflect.ValueOf(source), reflect.ValueOf(target)) -} - -func assert(source, target reflect.Value) error { - // Deference interface/pointers for direct comparison - for canElem(source) { - source = source.Elem() - } - for canElem(target) { - target = target.Elem() - } - - if source.Type() != target.Type() { - return &Error{Message: fmt.Sprintf("type mismatch: %T != %T", source.Interface(), target.Interface())} - } - - switch source.Kind() { - case reflect.Slice, reflect.Array: - if source.Len() != target.Len() { - return &Error{Message: fmt.Sprintf("length mismatch: %d != %d", source.Len(), target.Len())} - } - for i := 0; i < source.Len(); i++ { - if err := assert(source.Index(i), target.Index(i)); err != nil { - return &Error{ - Message: fmt.Sprintf("element %d", i), - Inner: err, - } - } - } - return nil - - case reflect.Map: - iter := source.MapRange() - for iter.Next() { - var ( - sourceElement = iter.Value() - targetElement = target.MapIndex(iter.Key()) - ) - if !targetElement.IsValid() { - return &Error{Message: fmt.Sprintf("missing key %v", iter.Key().Interface())} - } - if err := assert(sourceElement, targetElement); err != nil { - return &Error{ - Message: fmt.Sprintf("%v", iter.Key().Interface()), - Inner: err, - } - } - } - return nil - - default: - if !reflect.DeepEqual(source.Interface(), target.Interface()) { - return &Error{Message: fmt.Sprintf("%v != %v", source, target)} - } - return nil - } -} - -func canElem(v reflect.Value) bool { - return v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr -} - -// Error is a subset assertion error. -type Error struct { - Message string // Message of the error - Inner error // Optional inner error -} - -// Error implements error. -func (e *Error) Error() string { - if e.Inner == nil { - return e.Message - } - return fmt.Sprintf("%s: %s", e.Message, e.Inner) -} - -// Unwrap returns the inner error, if set. -func (e *Error) Unwrap() error { return e.Inner } - -// YAMLAssert is like Assert but accepts YAML bytes as input. -func YAMLAssert(source, target []byte) error { - var sourceValue interface{} - if err := yaml.Unmarshal(source, &sourceValue); err != nil { - return err - } - var targetValue interface{} - if err := yaml.Unmarshal(target, &targetValue); err != nil { - return err - } - return Assert(sourceValue, targetValue) -} diff --git a/internal/util/subset/subset_test.go b/internal/util/subset/subset_test.go deleted file mode 100644 index a44441dd26..0000000000 --- a/internal/util/subset/subset_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package subset - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAssert(t *testing.T) { - tt := []struct { - name string - source, target string - expect string - }{ - // Plain values - { - name: "values match", - source: `true`, - target: `true`, - expect: "", - }, - { - name: "values mismatch", - source: `true`, - target: `false`, - expect: "true != false", - }, - { - name: "type mismatch", - source: `true`, - target: `5`, - expect: "type mismatch: bool != int", - }, - - // Arrays - { - name: "arrays match", - source: `[1, 2, 3]`, - target: `[1, 2, 3]`, - expect: "", - }, - { - name: "arrays mismatch", - source: `[1, 2, 3]`, - target: `[1, 2, 4]`, - expect: "element 2: 3 != 4", - }, - { - name: "array element type mismatch", - source: `[1, 2, 3]`, - target: `[1, 2, true]`, - expect: "element 2: type mismatch: int != bool", - }, - - // Maps - { - name: "maps match", - source: `{"hello": "world"}`, - target: `{"hello": "world"}`, - expect: "", - }, - { - name: "maps mismatch", - source: `{"hello": "world", "year": 2000}`, - target: `{"hello": "world", "year": 2001}`, - expect: "year: 2000 != 2001", - }, - { - name: "maps subset", - source: `{"hello": "world"}`, - target: `{"hello": "world", "year": 2001}`, - expect: "", - }, - { - name: "maps type mismatch", - source: `{"hello": "world", "year": 2000}`, - target: `{"hello": "world", "year": "yes"}`, - expect: "year: type mismatch: int != string", - }, - } - - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - err := YAMLAssert([]byte(tc.source), []byte(tc.target)) - if tc.expect == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, tc.expect) - } - }) - } -} diff --git a/internal/util/unregisterer.go b/internal/util/unregisterer.go deleted file mode 100644 index 822132b017..0000000000 --- a/internal/util/unregisterer.go +++ /dev/null @@ -1,63 +0,0 @@ -package util - -import "github.com/prometheus/client_golang/prometheus" - -// Unregisterer is a Prometheus Registerer that can unregister all collectors -// passed to it. -type Unregisterer struct { - wrap prometheus.Registerer - cs map[prometheus.Collector]struct{} -} - -// WrapWithUnregisterer wraps a prometheus Registerer with capabilities to -// unregister all collectors. -func WrapWithUnregisterer(reg prometheus.Registerer) *Unregisterer { - return &Unregisterer{ - wrap: reg, - cs: make(map[prometheus.Collector]struct{}), - } -} - -// Register implements prometheus.Registerer. -func (u *Unregisterer) Register(c prometheus.Collector) error { - if u.wrap == nil { - return nil - } - - err := u.wrap.Register(c) - if err != nil { - return err - } - u.cs[c] = struct{}{} - return nil -} - -// MustRegister implements prometheus.Registerer. -func (u *Unregisterer) MustRegister(cs ...prometheus.Collector) { - for _, c := range cs { - if err := u.Register(c); err != nil { - panic(err) - } - } -} - -// Unregister implements prometheus.Registerer. -func (u *Unregisterer) Unregister(c prometheus.Collector) bool { - if u.wrap != nil && u.wrap.Unregister(c) { - delete(u.cs, c) - return true - } - return false -} - -// UnregisterAll unregisters all collectors that were registered through the -// Registerer. -func (u *Unregisterer) UnregisterAll() bool { - success := true - for c := range u.cs { - if !u.Unregister(c) { - success = false - } - } - return success -} From 8d4de4ce24acd7c6f5e99eace29a9d52a6d4f8a4 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Thu, 21 Mar 2024 14:14:12 -0400 Subject: [PATCH 033/136] tools/make: fix dependencies for package targets (#56) --- tools/make/packaging.mk | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index 41133a3f18..0c45f94eb1 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -150,7 +150,7 @@ define generate_agent_fpm = --config-files $(AGENT_ENVIRONMENT_FILE_$(1)) \ --rpm-rpmbuild-define "_build_id_links none" \ --package $(4) \ - dist.temp/grafana-agent-linux-$(3)=/usr/bin/grafana-agent \ + dist/grafana-agent-linux-$(3)=/usr/bin/grafana-agent \ packaging/grafana-agent-/grafana-agent.river=/etc/grafana-agent.river \ packaging/grafana-agent-/environment-file=$(AGENT_ENVIRONMENT_FILE_$(1)) \ packaging/grafana-agent-/$(1)/grafana-agent.service=/usr/lib/systemd/system/grafana-agent.service @@ -167,7 +167,7 @@ dist-agent-packages: dist-agent-packages-amd64 \ dist-agent-packages-s390x .PHONY: dist-agent-packages-amd64 -dist-agent-packages-amd64: dist.temp/grafana-agent-linux-amd64 +dist-agent-packages-amd64: dist/grafana-agent-linux-amd64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -176,7 +176,7 @@ else endif .PHONY: dist-agent-packages-arm64 -dist-agent-packages-arm64: dist.temp/grafana-agent-linux-arm64 +dist-agent-packages-arm64: dist/grafana-agent-linux-arm64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -185,7 +185,7 @@ else endif .PHONY: dist-agent-packages-ppc64le -dist-agent-packages-ppc64le: dist.temp/grafana-agent-linux-ppc64le +dist-agent-packages-ppc64le: dist/grafana-agent-linux-ppc64le ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -194,7 +194,7 @@ else endif .PHONY: dist-agent-packages-s390x -dist-agent-packages-s390x: dist.temp/grafana-agent-linux-s390x +dist-agent-packages-s390x: dist/grafana-agent-linux-s390x ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -207,7 +207,7 @@ endif # .PHONY: dist-agent-installer -dist-agent-installer: dist.temp/grafana-agent-windows-amd64.exe dist.temp/grafana-agent-service-windows-amd64.exe +dist-agent-installer: dist/grafana-agent-windows-amd64.exe dist.temp/grafana-agent-service-windows-amd64.exe ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else From efb7a3ce25a8671349b76e3de198e8a9cc323be2 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 22 Mar 2024 08:54:44 -0400 Subject: [PATCH 034/136] Makefile: add linting and testing for submodules (#59) --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f844732d9a..b369f6bec9 100644 --- a/Makefile +++ b/Makefile @@ -135,15 +135,17 @@ endif .PHONY: lint lint: agentlint - golangci-lint run -v --timeout=10m + find . -name go.mod -execdir golangci-lint run -v --timeout=10m \; $(AGENTLINT_BINARY) ./... .PHONY: test # We have to run test twice: once for all packages with -race and then once -# more without -race for packages that have known race detection issues. +# more without -race for packages that have known race detection issues. The +# final command runs tests for all other submodules. test: $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) $(GO_ENV) go test $(GO_FLAGS) ./internal/static/integrations/node_exporter ./internal/static/logs ./internal/component/otelcol/processor/tail_sampling ./internal/component/loki/source/file ./internal/component/loki/source/docker + $(GO_ENV) find . -name go.mod -not -path "./go.mod" -execdir go test -race ./... \; test-packages: docker pull $(BUILD_IMAGE) From df75c28bd5629dbf834bc81f723da5311b8c5867 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 22 Mar 2024 13:19:37 -0400 Subject: [PATCH 035/136] ci: fix backport job (#60) Use common secret GITHUB_TOKEN for backport jobs. This means that backports will be done by the github-actions bot instead of grafanabot, but the backport job will work properly. I verified this works by looking at grafana/tempo, which uses GITHUB_TOKEN instead of a grafanabot API token. --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index fbf9ddd1f1..aaa838f2ac 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,6 +22,6 @@ jobs: - name: Run backport uses: ./actions/backport with: - token: ${{secrets.GH_BOT_ACCESS_TOKEN}} + token: ${{secrets.GITHUB_TOKEN}} labelsToAdd: "backport" title: "[{{base}}] {{originalTitle}}" From adda0653470a90f7f2c8611af62466e7987ee829 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 22 Mar 2024 15:43:43 -0400 Subject: [PATCH 036/136] flowmode: only report usagestats for builtin components (#62) --- internal/component/component_provider.go | 27 ++++++++++++++++++++++++ internal/flow/flow_components.go | 9 ++++++++ internal/flowmode/cmd_run.go | 3 +++ 3 files changed, 39 insertions(+) diff --git a/internal/component/component_provider.go b/internal/component/component_provider.go index 630961d8f6..82a60782df 100644 --- a/internal/component/component_provider.go +++ b/internal/component/component_provider.go @@ -3,6 +3,7 @@ package component import ( "encoding/json" "errors" + "fmt" "strings" "time" @@ -73,6 +74,30 @@ func (id ID) String() string { return id.ModuleID + "/" + id.LocalID } +// Type denotes a type of component. +type Type int + +const ( + // TypeInvalid is an invalid value for Type. + TypeInvalid Type = iota + TypeBuiltin // TypeBuiltin represents builtin components. + TypeCustom // TypeCustom represents custom components defined using `declare`. +) + +// String returns a string representation of the component type. +func (t Type) String() string { + switch t { + case TypeInvalid: + return "" + case TypeBuiltin: + return "builtin" + case TypeCustom: + return "custom" + default: + return fmt.Sprintf("Type(%d)", t) + } +} + // Info ia detailed information about a component. type Info struct { // Component is the instance of the component. Component may be nil if a @@ -80,6 +105,8 @@ type Info struct { // evaluated yet. Component Component + Type Type // Type of the component. + // ModuleIDs includes the list of current module IDs that the component is // running. Module IDs are always globally unique. // diff --git a/internal/flow/flow_components.go b/internal/flow/flow_components.go index bdbe87a2ab..d23b1da6b5 100644 --- a/internal/flow/flow_components.go +++ b/internal/flow/flow_components.go @@ -108,6 +108,7 @@ func (f *Flow) getComponentDetail(cn controller.ComponentNode, graph *dag.Graph, LocalID: cn.NodeID(), }, Label: cn.Label(), + Type: componentType(cn), References: references, ReferencedBy: referencedBy, @@ -129,3 +130,11 @@ func (f *Flow) getComponentDetail(cn controller.ComponentNode, graph *dag.Graph, } return componentInfo } + +func componentType(cn controller.ComponentNode) component.Type { + if _, ok := cn.(*controller.BuiltinComponentNode); ok { + return component.TypeBuiltin + } + + return component.TypeCustom +} diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index aebc751dc2..3530853d87 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -387,6 +387,9 @@ func getEnabledComponentsFunc(f *flow.Flow) func() map[string]interface{} { components := component.GetAllComponents(f, component.InfoOptions{}) componentNames := map[string]struct{}{} for _, c := range components { + if c.Type != component.TypeBuiltin { + continue + } componentNames[c.ComponentName] = struct{}{} } return map[string]interface{}{"enabled-components": maps.Keys(componentNames)} From b36ee4bd50820eabc708756b243b802f8e0e3adf Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:10:29 -0700 Subject: [PATCH 037/136] Docs/set docs hidden (#61) * Set docs to be hidden * Add hidden flag to other top level topics --- docs/sources/_index.md | 19 +++---------------- docs/sources/_index.md.t | 19 +++---------------- docs/sources/about.md | 3 +++ docs/sources/data-collection.md | 3 +++ docs/sources/release-notes.md | 3 +++ docs/sources/stability.md | 3 +++ 6 files changed, 18 insertions(+), 32 deletions(-) diff --git a/docs/sources/_index.md b/docs/sources/_index.md index e13b013f8f..1d1b838505 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -8,6 +8,9 @@ cascade: OTEL_VERSION: v0.87.0 PRODUCT_NAME: Grafana Alloy PRODUCT_ROOT_NAME: Alloy + _build: + list: false + noindex: true --- # {{% param "PRODUCT_NAME" %}} @@ -32,22 +35,6 @@ It is designed to be flexible, performant, and compatible with multiple ecosyste * **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. * **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. - - ## Supported platforms * Linux diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index a751d5bbed..475bbabef2 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -8,6 +8,9 @@ cascade: OTEL_VERSION: v0.87.0 PRODUCT_NAME: Grafana Alloy PRODUCT_ROOT_NAME: Alloy + _build: + list: false + noindex: true --- # {{% param "PRODUCT_NAME" %}} @@ -32,22 +35,6 @@ It is designed to be flexible, performant, and compatible with multiple ecosyste * **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. * **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. - - ## Supported platforms * Linux diff --git a/docs/sources/about.md b/docs/sources/about.md index a2dcb00b71..b4252131e5 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -4,6 +4,9 @@ description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry menuTitle: Introduction title: Introduction to Grafana Alloy weight: 10 +_build: + list: false +noindex: true --- # Introduction to {{% param "PRODUCT_NAME" %}} diff --git a/docs/sources/data-collection.md b/docs/sources/data-collection.md index 21d2655b00..f0cf01dfe6 100644 --- a/docs/sources/data-collection.md +++ b/docs/sources/data-collection.md @@ -4,6 +4,9 @@ description: Grafana Alloy data collection menuTitle: Data collection title: Grafana Alloy data collection weight: 900 +_build: + list: false +noindex: true --- # {{% param "PRODUCT_NAME" %}} Data collection diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index 2d282a0235..400a56eb38 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -4,6 +4,9 @@ description: Release notes for Grafana Alloy menuTitle: Release notes title: Release notes for Grafana Alloy weight: 999 +_build: + list: false +noindex: true --- # Release notes for {{% param "PRODUCT_NAME" %}} diff --git a/docs/sources/stability.md b/docs/sources/stability.md index 2cdb1d0087..29bab56324 100644 --- a/docs/sources/stability.md +++ b/docs/sources/stability.md @@ -3,6 +3,9 @@ canonical: https://grafana.com/docs/alloy/latest/stability/ description: Grafana Alloy features fall into one of three stability categories, experimental, beta, or stable title: Stability weight: 600 +_build: + list: false +noindex: true --- # Stability From 77fba860d63b9efa7133a5162eafd1cb1439c680 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Fri, 22 Mar 2024 18:43:39 -0400 Subject: [PATCH 038/136] build: temporarily remove boringcrypto (#63) Temporarily remove boringcrypto builds as we need time to figure out what boringcrypto support looks like in Alloy and how we deliver release assets for boringcrypto builds. --- .drone/drone.yml | 53 +---------------------- .drone/pipelines/crosscompile.jsonnet | 22 +--------- .drone/pipelines/publish.jsonnet | 5 +-- Makefile | 62 ++++++++------------------- tools/ci/docker-containers | 20 +-------- tools/make/packaging.mk | 24 +---------- 6 files changed, 25 insertions(+), 161 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index 59fc5df03c..a8a40627e9 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -252,57 +252,6 @@ trigger: type: docker --- kind: pipeline -name: Build agent-boringcrypto (Linux amd64 boringcrypto) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= - GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.40.2 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-boringcrypto (Linux arm64 boringcrypto) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= - GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.40.2 - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline -name: Build agent-windows-boringcrypto (Windows amd64) -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-ui - - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= GOEXPERIMENT=cngcrypto - make agent-windows-boringcrypto - image: grafana/agent-build-image:0.40.2-boringcrypto - name: Build -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline name: Publish development Linux agent container platform: arch: amd64 @@ -450,6 +399,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: eb4c87d4abc880513c7c2977c46910fa96041461aa2edea16a7970f5c145dd01 +hmac: 2088d828b5aeec4f38008f932b1402a03745e941ba06c6c4d72a96d42dfb7f01 ... diff --git a/.drone/pipelines/crosscompile.jsonnet b/.drone/pipelines/crosscompile.jsonnet index 58c28575ad..b516ffcb79 100644 --- a/.drone/pipelines/crosscompile.jsonnet +++ b/.drone/pipelines/crosscompile.jsonnet @@ -32,24 +32,6 @@ local targets = [ 'agent', ]; -local targets_boringcrypto = [ - 'agent-boringcrypto', -]; -local targets_boringcrypto_windows = [ - 'agent-windows-boringcrypto', -]; - - -local os_arch_types_boringcrypto = [ - // Linux boringcrypto - { name: 'Linux amd64 boringcrypto', os: 'linux', arch: 'amd64', experiment: 'boringcrypto' }, - { name: 'Linux arm64 boringcrypto', os: 'linux', arch: 'arm64', experiment: 'boringcrypto' }, -]; -local windows_os_arch_types_boringcrypto = [ - // Windows boringcrypto - { name: 'Windows amd64', os: 'windows', arch: 'amd64', experiment: 'cngcrypto' }, -]; - local build_environments(targets, tuples, image) = std.flatMap(function(target) ( std.map(function(platform) ( pipelines.linux('Build %s (%s)' % [target, platform.name]) { @@ -81,6 +63,4 @@ local build_environments(targets, tuples, image) = std.flatMap(function(target) ), tuples) ), targets); -build_environments(targets, os_arch_tuples, build_image.linux) + -build_environments(targets_boringcrypto, os_arch_types_boringcrypto, build_image.linux) + -build_environments(targets_boringcrypto_windows, windows_os_arch_types_boringcrypto, build_image.boringcrypto) +build_environments(targets, os_arch_tuples, build_image.linux) diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index 7501017ac3..e81d4386de 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -6,8 +6,7 @@ local ghTokenFilename = '/drone/src/gh-token.txt'; // job_names gets the list of job names for use in depends_on. local job_names = function(jobs) std.map(function(job) job.name, jobs); -local linux_containers = ['agent', 'agent-boringcrypto']; -local dev_linux_containers = ['agent']; // TODO(rfratto): add boringcrypto after figuring out what to do with it +local linux_containers = ['agent']; local linux_containers_dev_jobs = std.map(function(container) ( pipelines.linux('Publish development Linux %s container' % container) { @@ -59,7 +58,7 @@ local linux_containers_dev_jobs = std.map(function(container) ( host: { path: '/var/run/docker.sock' }, }], } -), dev_linux_containers); +), linux_containers); local linux_containers_jobs = std.map(function(container) ( diff --git a/Makefile b/Makefile index b369f6bec9..5fd875edc9 100644 --- a/Makefile +++ b/Makefile @@ -23,15 +23,12 @@ ## ## binaries Compiles all binaries. ## agent Compiles cmd/grafana-agent to $(AGENT_BINARY) -## agent-boringcrypto Compiles cmd/grafana-agent with GOEXPERIMENT=boringcrypto to $(AGENT_BORINGCRYPTO_BINARY) -## agent-windows-boringcrypto Compiles cmd/grafana-agent to $(AGENT_BORINGCRYPTO_BINARY) ## agent-service Compiles cmd/grafana-agent-service to $(SERVICE_BINARY) ## ## Targets for building Docker images: ## ## images Builds all Docker images. ## agent-image Builds agent Docker image. -## agent-boringcrypto-image Builds agent Docker image with boringcrypto. ## ## Targets for packaging: ## @@ -60,30 +57,25 @@ ## ## Environment variables: ## -## USE_CONTAINER Set to 1 to enable proxying commands to build container -## AGENT_IMAGE Image name:tag built by `make agent-image` -## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 -## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) -## AGENT_BORINGCRYPTO_BINARY Output path of `make agent-boringcrypto` (default build/grafana-agent-boringcrypto) -## AGENT_BORINGCRYPTO_WINDOWS_BINARY Output path of `make agent-windows-boringcrypto` (default build/grafana-agent-windows-boringcrypto.exe) -## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) -## GOOS Override OS to build binaries for -## GOARCH Override target architecture to build binaries for -## GOARM Override ARM version (6 or 7) when GOARCH=arm -## CGO_ENABLED Set to 0 to disable Cgo for binaries. -## RELEASE_BUILD Set to 1 to build release binaries. -## VERSION Version to inject into built binaries. -## GO_TAGS Extra tags to use when building. -## DOCKER_PLATFORM Overrides platform to build Docker images for (defaults to host platform). -## GOEXPERIMENT Used to enable features, most likely boringcrypto via GOEXPERIMENT=boringcrypto. +## USE_CONTAINER Set to 1 to enable proxying commands to build container +## AGENT_IMAGE Image name:tag built by `make agent-image` +## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 +## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) +## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) +## GOOS Override OS to build binaries for +## GOARCH Override target architecture to build binaries for +## GOARM Override ARM version (6 or 7) when GOARCH=arm +## CGO_ENABLED Set to 0 to disable Cgo for binaries. +## RELEASE_BUILD Set to 1 to build release binaries. +## VERSION Version to inject into built binaries. +## GO_TAGS Extra tags to use when building. +## DOCKER_PLATFORM Overrides platform to build Docker images for (defaults to host platform). +## GOEXPERIMENT Used to enable Go features behind feature flags. include tools/make/*.mk AGENT_IMAGE ?= grafana/agent:latest -AGENT_BORINGCRYPTO_IMAGE ?= grafana/agent-boringcrypto:latest AGENT_BINARY ?= build/grafana-agent -AGENT_BORINGCRYPTO_BINARY ?= build/grafana-agent-boringcrypto -AGENT_BORINGCRYPTO_WINDOWS_BINARY ?= build/grafana-agent-windows-boringcrypto.exe SERVICE_BINARY ?= build/grafana-agent-service AGENTLINT_BINARY ?= build/agentlint GOOS ?= $(shell go env GOOS) @@ -98,7 +90,7 @@ GOEXPERIMENT ?= $(shell go env GOEXPERIMENT) PROPAGATE_VARS := \ AGENT_IMAGE \ BUILD_IMAGE GOOS GOARCH GOARM CGO_ENABLED RELEASE_BUILD \ - AGENT_BINARY AGENT_BORINGCRYPTO_BINARY \ + AGENT_BINARY \ VERSION GO_TAGS GOEXPERIMENT # @@ -159,8 +151,8 @@ integration-test: # Targets for building binaries # -.PHONY: binaries agent agent-boringcrypto -binaries: agent agent-boringcrypto +.PHONY: binaries agent +binaries: agent agent: ifeq ($(USE_CONTAINER),1) @@ -169,20 +161,6 @@ else $(GO_ENV) go build $(GO_FLAGS) -o $(AGENT_BINARY) ./cmd/grafana-agent endif -agent-boringcrypto: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - GOEXPERIMENT=boringcrypto $(GO_ENV) go build $(GO_FLAGS) -o $(AGENT_BORINGCRYPTO_BINARY) ./cmd/grafana-agent -endif - -agent-windows-boringcrypto: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - GOEXPERIMENT=cngcrypto $(GO_ENV) go build $(GO_FLAGS) -tags cngcrypto -o $(AGENT_BORINGCRYPTO_WINDOWS_BINARY) ./cmd/grafana-agent -endif - # agent-service is not included in binaries since it's Windows-only. agent-service: ifeq ($(USE_CONTAINER),1) @@ -208,13 +186,11 @@ ifneq ($(DOCKER_PLATFORM),) DOCKER_FLAGS += --platform=$(DOCKER_PLATFORM) endif -.PHONY: images agent-image agent-boringcrypto-image -images: agent-image agent-boringcrypto-image +.PHONY: images agent-image +images: agent-image agent-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENT_IMAGE) -f cmd/grafana-agent/Dockerfile . -agent-boringcrypto-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) --build-arg GOEXPERIMENT=boringcrypto -t $(AGENT_BORINGCRYPTO_IMAGE) -f cmd/grafana-agent/Dockerfile . # # Targets for generating assets diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index 44c14cb4c6..a712dba8cc 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -8,12 +8,9 @@ set -euxo pipefail RELEASE_AGENT_IMAGE=grafana/agent -RELEASE_AGENTBORINGCRYPTO_IMAGE=grafana/agent-boringcrypto DEVELOPMENT_AGENT_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy -DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy-boringcrypto DEFAULT_AGENT_IMAGE=${RELEASE_AGENT_IMAGE} -DEFAULT_AGENTBORINGCRYPTO_IMAGE=${RELEASE_AGENTBORINGCRYPTO_IMAGE} # Environment variables used throughout this script. These must be set # otherwise bash will fail with an "unbound variable" error because of the `set @@ -27,11 +24,9 @@ export DEVELOPMENT=${DEVELOPMENT:-} if [ -n "$DEVELOPMENT" ]; then DEFAULT_AGENT_IMAGE=${DEVELOPMENT_AGENT_IMAGE} - DEFAULT_AGENTBORINGCRYPTO_IMAGE=${DEVELOPMENT_AGENTBORINGCRYPTO_IMAGE} fi export AGENT_IMAGE=${DEFAULT_AGENT_IMAGE} -export AGENT_BORINGCRYPTO_IMAGE=${DEFAULT_AGENTBORINGCRYPTO_IMAGE} # We need to determine what version to assign to built binaries. If containers # are being built from a Drone tag trigger, we force the version to come from the @@ -65,7 +60,6 @@ fi # Build all of our images. export BUILD_PLATFORMS=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x -export BUILD_PLATFORMS_BORINGCRYPTO=linux/amd64,linux/arm64 case "$TARGET_CONTAINER" in agent) @@ -79,20 +73,8 @@ case "$TARGET_CONTAINER" in . ;; - agent-boringcrypto) - docker buildx build --push \ - --platform $BUILD_PLATFORMS_BORINGCRYPTO \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - --build-arg GOEXPERIMENT=boringcrypto \ - -t "$AGENT_BORINGCRYPTO_IMAGE:$TAG_VERSION" \ - -t "$AGENT_BORINGCRYPTO_IMAGE:$BRANCH_TAG" \ - -f cmd/grafana-agent/Dockerfile \ - . - ;; - *) - echo "Usage: $0 agent|agent-boringcrypto" + echo "Usage: $0 agent" exit 1 ;; esac diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index 0c45f94eb1..bd372f8165 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -27,9 +27,7 @@ dist-agent-binaries: dist/grafana-agent-linux-amd64 \ dist/grafana-agent-darwin-amd64 \ dist/grafana-agent-darwin-arm64 \ dist/grafana-agent-windows-amd64.exe \ - dist/grafana-agent-windows-boringcrypto-amd64.exe \ - dist/grafana-agent-freebsd-amd64 \ - dist/grafana-agent-linux-arm64-boringcrypto + dist/grafana-agent-freebsd-amd64 dist/grafana-agent-linux-amd64: GO_TAGS += netgo builtinassets promtail_journal_enabled dist/grafana-agent-linux-amd64: GOOS := linux @@ -83,32 +81,12 @@ dist/grafana-agent-windows-amd64.exe: generate-ui # # TODO(rfratto): add netgo back to Windows builds if a version of Go is # released which natively supports resolving DNS short names on Windows. -dist/grafana-agent-windows-boringcrypto-amd64.exe: GO_TAGS += builtinassets -dist/grafana-agent-windows-boringcrypto-amd64.exe: GOOS := windows -dist/grafana-agent-windows-boringcrypto-amd64.exe: GOARCH := amd64 -dist/grafana-agent-windows-boringcrypto-amd64.exe: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - dist/grafana-agent-freebsd-amd64: GO_TAGS += netgo builtinassets dist/grafana-agent-freebsd-amd64: GOOS := freebsd dist/grafana-agent-freebsd-amd64: GOARCH := amd64 dist/grafana-agent-freebsd-amd64: generate-ui $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent -dist/grafana-agent-linux-amd64-boringcrypto: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-amd64-boringcrypto: GOOS := linux -dist/grafana-agent-linux-amd64-boringcrypto: GOARCH := amd64 -dist/grafana-agent-linux-amd64-boringcrypto: GOEXPERIMENT := boringcrypto -dist/grafana-agent-linux-amd64-boringcrypto: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-linux-arm64-boringcrypto: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-arm64-boringcrypto: GOOS := linux -dist/grafana-agent-linux-arm64-boringcrypto: GOARCH := arm64 -dist/grafana-agent-linux-arm64-boringcrypto: GOEXPERIMENT := boringcrypto -dist/grafana-agent-linux-arm64-boringcrypto: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - # # agent-service release binaries. # From df91a38b299d97151c06d29880c7d09e8035f293 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 13:50:42 -0400 Subject: [PATCH 039/136] misc: rename binaries to Alloy (#65) This also removes the `grafana-` prefix where present, so that `grafana-agent` becomes `alloy`. The `grafana-` prefix was originally added due to the ambiguity of the term "agent," but as Alloy is a much more unique identity the prefix is no longer needed and is removed for the sake of brevity. --- .drone/drone.yml | 56 ++--- .drone/pipelines/check_containers.jsonnet | 4 +- .drone/pipelines/crosscompile.jsonnet | 2 +- .drone/pipelines/publish.jsonnet | 10 +- .drone/pipelines/test_packages.jsonnet | 2 +- .github/workflows/check_docs.yml | 4 +- .../publish-documentation-next.yml.disabled | 8 +- ...blish-documentation-versioned.yml.disabled | 8 +- .github/workflows/trivy.yml | 4 +- .github/workflows/update-make-docs.yml | 2 +- Makefile | 54 ++--- build-image/Dockerfile | 1 - .../config_windows.go | 20 +- cmd/alloy-service/doc.go | 3 + .../logger_windows.go | 0 .../main_windows.go | 8 +- .../service.go | 0 .../service_test.go | 0 .../testdata/example_service.go | 0 cmd/{grafana-agent => alloy}/Dockerfile | 32 +-- .../Dockerfile.windows | 18 +- .../example-config.river | 0 cmd/{grafana-agent => alloy}/main.go | 0 cmd/grafana-agent-service/doc.go | 3 - docs/developer/contributing.md | 12 +- internal/cmd/integration-tests/README.md | 6 +- internal/cmd/integration-tests/main.go | 4 +- internal/cmd/integration-tests/utils.go | 24 +- .../agent_linux_packages_test.go | 10 +- .../grafana-agent.river => alloy/alloy.river} | 6 +- packaging/alloy/deb/alloy.service | 20 ++ packaging/alloy/deb/control/postinst | 41 ++++ .../deb/control/prerm | 4 +- packaging/alloy/environment-file | 16 ++ packaging/alloy/rpm/alloy.service | 20 ++ packaging/alloy/rpm/control/postinst | 45 ++++ packaging/alloy/rpm/control/prerm | 20 ++ .../{grafana-agent => alloy}/rpm/gpg-sign.sh | 0 .../windows/config.river | 0 .../windows/install_script.nsis | 28 +-- .../{grafana-agent => alloy}/windows/logo.ico | Bin .../windows/macros.nsis | 0 packaging/grafana-agent/deb/control/postinst | 41 ---- .../grafana-agent/deb/grafana-agent.service | 20 -- packaging/grafana-agent/environment-file | 16 -- packaging/grafana-agent/rpm/control/postinst | 49 ---- packaging/grafana-agent/rpm/control/prerm | 20 -- .../grafana-agent/rpm/grafana-agent.service | 20 -- tools/ci/docker-containers | 20 +- tools/ci/docker-containers-windows | 42 ++-- tools/make/build-container.mk | 4 +- tools/make/packaging.mk | 214 +++++++++--------- 52 files changed, 460 insertions(+), 481 deletions(-) rename cmd/{grafana-agent-service => alloy-service}/config_windows.go (70%) create mode 100644 cmd/alloy-service/doc.go rename cmd/{grafana-agent-service => alloy-service}/logger_windows.go (100%) rename cmd/{grafana-agent-service => alloy-service}/main_windows.go (92%) rename cmd/{grafana-agent-service => alloy-service}/service.go (100%) rename cmd/{grafana-agent-service => alloy-service}/service_test.go (100%) rename cmd/{grafana-agent-service => alloy-service}/testdata/example_service.go (100%) rename cmd/{grafana-agent => alloy}/Dockerfile (62%) rename cmd/{grafana-agent => alloy}/Dockerfile.windows (57%) rename cmd/{grafana-agent => alloy}/example-config.river (100%) rename cmd/{grafana-agent => alloy}/main.go (100%) delete mode 100644 cmd/grafana-agent-service/doc.go rename packaging/{grafana-agent/grafana-agent.river => alloy/alloy.river} (87%) create mode 100644 packaging/alloy/deb/alloy.service create mode 100644 packaging/alloy/deb/control/postinst rename packaging/{grafana-agent => alloy}/deb/control/prerm (50%) create mode 100644 packaging/alloy/environment-file create mode 100644 packaging/alloy/rpm/alloy.service create mode 100644 packaging/alloy/rpm/control/postinst create mode 100644 packaging/alloy/rpm/control/prerm rename packaging/{grafana-agent => alloy}/rpm/gpg-sign.sh (100%) rename packaging/{grafana-agent => alloy}/windows/config.river (100%) rename packaging/{grafana-agent => alloy}/windows/install_script.nsis (88%) rename packaging/{grafana-agent => alloy}/windows/logo.ico (100%) rename packaging/{grafana-agent => alloy}/windows/macros.nsis (100%) delete mode 100644 packaging/grafana-agent/deb/control/postinst delete mode 100644 packaging/grafana-agent/deb/grafana-agent.service delete mode 100644 packaging/grafana-agent/environment-file delete mode 100644 packaging/grafana-agent/rpm/control/postinst delete mode 100644 packaging/grafana-agent/rpm/control/prerm delete mode 100644 packaging/grafana-agent/rpm/grafana-agent.service diff --git a/.drone/drone.yml b/.drone/drone.yml index a8a40627e9..ce1dd8a68c 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -69,13 +69,13 @@ trigger: type: docker --- kind: pipeline -name: Check Linux container (grafana/agent) +name: Check Linux container (grafana/alloy) platform: arch: amd64 os: linux steps: - commands: - - make agent-image + - make alloy-image image: grafana/agent-build-image:0.40.2 name: Build container volumes: @@ -83,7 +83,7 @@ steps: path: /var/run/docker.sock trigger: paths: - - cmd/grafana-agent/Dockerfile + - cmd/alloy/Dockerfile - tools/ci/docker-containers ref: - refs/heads/main @@ -94,14 +94,14 @@ volumes: name: docker --- kind: pipeline -name: Check Windows container (grafana/agent) +name: Check Windows container (grafana/alloy) platform: arch: amd64 os: windows version: "1809" steps: - commands: - - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' + - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows alloy' image: grafana/agent-build-image:0.40.2-windows name: Build container volumes: @@ -109,7 +109,7 @@ steps: path: //./pipe/docker_engine/ trigger: paths: - - cmd/grafana-agent/Dockerfile.windows + - cmd/alloy/Dockerfile.windows - tools/ci/docker-containers-windows ref: - refs/heads/main @@ -120,7 +120,7 @@ volumes: name: docker --- kind: pipeline -name: Build agent (Linux amd64) +name: Build alloy (Linux amd64) platform: arch: amd64 os: linux @@ -128,7 +128,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= - make agent + make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -137,7 +137,7 @@ trigger: type: docker --- kind: pipeline -name: Build agent (Linux arm64) +name: Build alloy (Linux arm64) platform: arch: amd64 os: linux @@ -145,7 +145,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= - make agent + make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -154,7 +154,7 @@ trigger: type: docker --- kind: pipeline -name: Build agent (Linux ppc64le) +name: Build alloy (Linux ppc64le) platform: arch: amd64 os: linux @@ -162,7 +162,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= - make agent + make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -171,7 +171,7 @@ trigger: type: docker --- kind: pipeline -name: Build agent (Linux s390x) +name: Build alloy (Linux s390x) platform: arch: amd64 os: linux @@ -179,7 +179,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= - make agent + make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -188,14 +188,14 @@ trigger: type: docker --- kind: pipeline -name: Build agent (macOS Intel) +name: Build alloy (macOS Intel) platform: arch: amd64 os: linux steps: - commands: - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent + - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -204,14 +204,14 @@ trigger: type: docker --- kind: pipeline -name: Build agent (macOS Apple Silicon) +name: Build alloy (macOS Apple Silicon) platform: arch: amd64 os: linux steps: - commands: - make generate-ui - - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent + - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -220,14 +220,14 @@ trigger: type: docker --- kind: pipeline -name: Build agent (Windows amd64) +name: Build alloy (Windows amd64) platform: arch: amd64 os: linux steps: - commands: - make generate-ui - - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent + - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -236,14 +236,14 @@ trigger: type: docker --- kind: pipeline -name: Build agent (FreeBSD amd64) +name: Build alloy (FreeBSD amd64) platform: arch: amd64 os: linux steps: - commands: - make generate-ui - - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent + - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make alloy image: grafana/agent-build-image:0.40.2 name: Build trigger: @@ -252,7 +252,7 @@ trigger: type: docker --- kind: pipeline -name: Publish development Linux agent container +name: Publish development Linux alloy container platform: arch: amd64 os: linux @@ -269,10 +269,10 @@ steps: - mkdir -p $HOME/.docker - printenv GCR_CREDS > $HOME/.docker/config.json - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-agent-${DRONE_COMMIT_SHA} --driver + - docker buildx create --name multiarch-alloy-alloy-${DRONE_COMMIT_SHA} --driver docker-container --use - - DEVELOPMENT=1 ./tools/ci/docker-containers agent - - docker buildx rm multiarch-agent-agent-${DRONE_COMMIT_SHA} + - DEVELOPMENT=1 ./tools/ci/docker-containers alloy + - docker buildx rm multiarch-alloy-alloy-${DRONE_COMMIT_SHA} environment: DOCKER_LOGIN: from_secret: docker_login @@ -301,7 +301,7 @@ platform: os: linux steps: - commands: - - DOCKER_OPTS="" make dist/grafana-agent-linux-amd64 + - DOCKER_OPTS="" make dist/alloy-linux-amd64 - DOCKER_OPTS="" make test-packages image: grafana/agent-build-image:0.40.2 name: Test Linux system packages @@ -399,6 +399,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 2088d828b5aeec4f38008f932b1402a03745e941ba06c6c4d72a96d42dfb7f01 +hmac: ba4497becf94a0f6f8dead2d99f8636683fbfba81eb869723c0a71ce4f7dcc09 ... diff --git a/.drone/pipelines/check_containers.jsonnet b/.drone/pipelines/check_containers.jsonnet index 28f4362310..5c5f2e5284 100644 --- a/.drone/pipelines/check_containers.jsonnet +++ b/.drone/pipelines/check_containers.jsonnet @@ -2,11 +2,11 @@ local build_image = import '../util/build_image.jsonnet'; local pipelines = import '../util/pipelines.jsonnet'; local linux_containers = [ - { name: 'grafana/agent', make: 'make agent-image', path: 'cmd/grafana-agent/Dockerfile' }, + { name: 'grafana/alloy', make: 'make alloy-image', path: 'cmd/alloy/Dockerfile' }, ]; local windows_containers = [ - { name: 'grafana/agent', argument: 'agent', path: 'cmd/grafana-agent/Dockerfile.windows' }, + { name: 'grafana/alloy', argument: 'alloy', path: 'cmd/alloy/Dockerfile.windows' }, ]; ( diff --git a/.drone/pipelines/crosscompile.jsonnet b/.drone/pipelines/crosscompile.jsonnet index b516ffcb79..cb00270e5a 100644 --- a/.drone/pipelines/crosscompile.jsonnet +++ b/.drone/pipelines/crosscompile.jsonnet @@ -29,7 +29,7 @@ local os_arch_tuples = [ local targets = [ - 'agent', + 'alloy', ]; local build_environments(targets, tuples, image) = std.flatMap(function(target) ( diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index e81d4386de..e78b634813 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -6,7 +6,7 @@ local ghTokenFilename = '/drone/src/gh-token.txt'; // job_names gets the list of job names for use in depends_on. local job_names = function(jobs) std.map(function(job) job.name, jobs); -local linux_containers = ['agent']; +local linux_containers = ['alloy']; local linux_containers_dev_jobs = std.map(function(container) ( pipelines.linux('Publish development Linux %s container' % container) { @@ -46,11 +46,11 @@ local linux_containers_dev_jobs = std.map(function(container) ( 'docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD', // Create a buildx worker for our cross platform builds. - 'docker buildx create --name multiarch-agent-%s-${DRONE_COMMIT_SHA} --driver docker-container --use' % container, + 'docker buildx create --name multiarch-alloy-%s-${DRONE_COMMIT_SHA} --driver docker-container --use' % container, 'DEVELOPMENT=1 ./tools/ci/docker-containers %s' % container, - 'docker buildx rm multiarch-agent-%s-${DRONE_COMMIT_SHA}' % container, + 'docker buildx rm multiarch-alloy-%s-${DRONE_COMMIT_SHA}' % container, ], }], volumes: [{ @@ -100,11 +100,11 @@ local linux_containers_jobs = std.map(function(container) ( 'docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD', // Create a buildx worker for our cross platform builds. - 'docker buildx create --name multiarch-agent-%s-${DRONE_COMMIT_SHA} --driver docker-container --use' % container, + 'docker buildx create --name multiarch-alloy-%s-${DRONE_COMMIT_SHA} --driver docker-container --use' % container, './tools/ci/docker-containers %s' % container, - 'docker buildx rm multiarch-agent-%s-${DRONE_COMMIT_SHA}' % container, + 'docker buildx rm multiarch-alloy-%s-${DRONE_COMMIT_SHA}' % container, ], }], volumes: [{ diff --git a/.drone/pipelines/test_packages.jsonnet b/.drone/pipelines/test_packages.jsonnet index b1b7d20715..42fca63679 100644 --- a/.drone/pipelines/test_packages.jsonnet +++ b/.drone/pipelines/test_packages.jsonnet @@ -18,7 +18,7 @@ local pipelines = import '../util/pipelines.jsonnet'; path: '/var/run/docker.sock', }], commands: [ - 'DOCKER_OPTS="" make dist/grafana-agent-linux-amd64', + 'DOCKER_OPTS="" make dist/alloy-linux-amd64', 'DOCKER_OPTS="" make test-packages', ], }], diff --git a/.github/workflows/check_docs.yml b/.github/workflows/check_docs.yml index 8cb45a3280..0ca919e1bd 100644 --- a/.github/workflows/check_docs.yml +++ b/.github/workflows/check_docs.yml @@ -9,7 +9,7 @@ jobs: - name: "Build technical documentation" run: > docker run - --volume "${PWD}/docs/sources:/hugo/content/docs/agent/latest" + --volume "${PWD}/docs/sources:/hugo/content/docs/alloy/latest" --env HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest - /bin/bash -c 'echo -e "---\\nredirectURL: /docs/agent/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/agent/_index.md && make hugo' + /bin/bash -c 'echo -e "---\\nredirectURL: /docs/alloy/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/alloy/_index.md && make hugo' diff --git a/.github/workflows/publish-documentation-next.yml.disabled b/.github/workflows/publish-documentation-next.yml.disabled index 18146cb44e..bda9606b88 100644 --- a/.github/workflows/publish-documentation-next.yml.disabled +++ b/.github/workflows/publish-documentation-next.yml.disabled @@ -17,17 +17,17 @@ jobs: - name: "Build technical documentation" run: > docker run - --volume "${PWD}/docs/sources:/hugo/content/docs/agent/latest" + --volume "${PWD}/docs/sources:/hugo/content/docs/alloy/latest" --env HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest - /bin/bash -c 'echo -e "---\\nredirectURL: /docs/agent/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/agent/_index.md && make hugo' + /bin/bash -c 'echo -e "---\\nredirectURL: /docs/alloy/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/alloy/_index.md && make hugo' sync: runs-on: ubuntu-latest needs: test steps: - - name: "Checkout Agent repo" + - name: "Checkout Alloy repo" uses: actions/checkout@v4 - name: "Clone website-sync Action" @@ -50,7 +50,7 @@ jobs: # The IT helpdesk can update the organization secret. github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_TOKEN }}" source_folder: docs/sources - target_folder: 'content/docs/agent/next' + target_folder: 'content/docs/alloy/next' allow_no_changes: true - shell: bash run: | diff --git a/.github/workflows/publish-documentation-versioned.yml.disabled b/.github/workflows/publish-documentation-versioned.yml.disabled index 3339bf59b2..728d54ef06 100644 --- a/.github/workflows/publish-documentation-versioned.yml.disabled +++ b/.github/workflows/publish-documentation-versioned.yml.disabled @@ -19,17 +19,17 @@ jobs: - name: "Build technical documentation" run: > docker run - --volume "${PWD}/docs/sources:/hugo/content/docs/agent/latest" + --volume "${PWD}/docs/sources:/hugo/content/docs/alloy/latest" --env HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest - /bin/bash -c 'echo -e "---\\nredirectURL: /docs/agent/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/agent/_index.md && make hugo' + /bin/bash -c 'echo -e "---\\nredirectURL: /docs/alloy/latest/\\ntype: redirect\\nversioned: true\\n---\\n" > /hugo/content/docs/alloy/_index.md && make hugo' sync: runs-on: ubuntu-latest needs: test steps: - - name: "Checkout Agent repo" + - name: "Checkout Alloy repo" uses: actions/checkout@v4 with: fetch-depth: 0 @@ -80,7 +80,7 @@ jobs: # The IT helpdesk can update the organization secret. github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_TOKEN }}" source_folder: docs/sources - target_folder: 'content/docs/agent/${{ steps.target.outputs.target }}' + target_folder: 'content/docs/alloy/${{ steps.target.outputs.target }}' allow_no_changes: true - shell: bash if: "steps.has-matching-release-tag.outputs.bool == 'true'" diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index ceea4dbcbe..b0351a97b3 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -28,7 +28,7 @@ jobs: - name: Run Trivy vulnerability scanner uses: aquasecurity/trivy-action@062f2592684a31eb3aa050cc61e7ca1451cecd3d with: - image-ref: 'grafana/agent:main' + image-ref: 'grafana/alloy:main' format: 'template' template: '@/contrib/sarif.tpl' output: 'trivy-results.sarif' @@ -37,4 +37,4 @@ jobs: - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v3 with: - sarif_file: 'trivy-results.sarif' \ No newline at end of file + sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/update-make-docs.yml b/.github/workflows/update-make-docs.yml index 188fd6bd80..ff8787ab9e 100644 --- a/.github/workflows/update-make-docs.yml +++ b/.github/workflows/update-make-docs.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: jobs: main: - if: github.repository == 'grafana/agent' + if: github.repository == 'grafana/alloy' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/Makefile b/Makefile index 5fd875edc9..848f60077c 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -## Build, test, and generate code for various parts of Grafana Agent. +## Build, test, and generate code for various parts of Alloy. ## ## At least Go 1.19, git, and a moderately recent version of Docker is required ## to be able to use the Makefile. This list isn't exhaustive and there are other @@ -22,20 +22,20 @@ ## Targets for building binaries: ## ## binaries Compiles all binaries. -## agent Compiles cmd/grafana-agent to $(AGENT_BINARY) -## agent-service Compiles cmd/grafana-agent-service to $(SERVICE_BINARY) +## alloy Compiles cmd/alloy to $(ALLOY_BINARY) +## alloy-service Compiles cmd/alloy-service to $(SERVICE_BINARY) ## ## Targets for building Docker images: ## ## images Builds all Docker images. -## agent-image Builds agent Docker image. +## alloy-image Builds alloy Docker image. ## ## Targets for packaging: ## ## dist Produce release assets for everything. -## dist-agent-binaries Produce release-ready agent binaries. +## dist-alloy-binaries Produce release-ready Alloy binaries. ## dist-packages Produce release-ready DEB and RPM packages. -## dist-agent-installer Produce a Windows installer for Grafana Agent. +## dist-alloy-installer Produce a Windows installer for Alloy. ## ## Targets for generating assets: ## @@ -58,10 +58,10 @@ ## Environment variables: ## ## USE_CONTAINER Set to 1 to enable proxying commands to build container -## AGENT_IMAGE Image name:tag built by `make agent-image` +## ALLOY_IMAGE Image name:tag built by `make alloy-image` ## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 -## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) -## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) +## ALLOY_BINARY Output path of `make alloy` (default build/alloy) +## SERVICE_BINARY Output path of `make alloy-service` (default build/alloy-service) ## GOOS Override OS to build binaries for ## GOARCH Override target architecture to build binaries for ## GOARM Override ARM version (6 or 7) when GOARCH=arm @@ -74,9 +74,9 @@ include tools/make/*.mk -AGENT_IMAGE ?= grafana/agent:latest -AGENT_BINARY ?= build/grafana-agent -SERVICE_BINARY ?= build/grafana-agent-service +ALLOY_IMAGE ?= grafana/alloy:latest +ALLOY_BINARY ?= build/alloy +SERVICE_BINARY ?= build/alloy-service AGENTLINT_BINARY ?= build/agentlint GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) @@ -88,9 +88,9 @@ GOEXPERIMENT ?= $(shell go env GOEXPERIMENT) # List of all environment variables which will propagate to the build # container. USE_CONTAINER must _not_ be included to avoid infinite recursion. PROPAGATE_VARS := \ - AGENT_IMAGE \ + ALLOY_IMAGE \ BUILD_IMAGE GOOS GOARCH GOARM CGO_ENABLED RELEASE_BUILD \ - AGENT_BINARY \ + ALLOY_BINARY \ VERSION GO_TAGS GOEXPERIMENT # @@ -151,22 +151,22 @@ integration-test: # Targets for building binaries # -.PHONY: binaries agent -binaries: agent +.PHONY: binaries alloy +binaries: alloy -agent: +alloy: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(GO_ENV) go build $(GO_FLAGS) -o $(AGENT_BINARY) ./cmd/grafana-agent + $(GO_ENV) go build $(GO_FLAGS) -o $(ALLOY_BINARY) ./cmd/alloy endif -# agent-service is not included in binaries since it's Windows-only. -agent-service: +# alloy-service is not included in binaries since it's Windows-only. +alloy-service: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(GO_ENV) go build $(GO_FLAGS) -o $(SERVICE_BINARY) ./cmd/grafana-agent-service + $(GO_ENV) go build $(GO_FLAGS) -o $(SERVICE_BINARY) ./cmd/alloy-service endif agentlint: @@ -186,11 +186,11 @@ ifneq ($(DOCKER_PLATFORM),) DOCKER_FLAGS += --platform=$(DOCKER_PLATFORM) endif -.PHONY: images agent-image -images: agent-image +.PHONY: images alloy-image +images: alloy-image -agent-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENT_IMAGE) -f cmd/grafana-agent/Dockerfile . +alloy-image: + DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(ALLOY_IMAGE) -f cmd/alloy/Dockerfile . # # Targets for generating assets @@ -258,9 +258,9 @@ clean: clean-dist clean-build-container-cache .PHONY: info info: @printf "USE_CONTAINER = $(USE_CONTAINER)\n" - @printf "AGENT_IMAGE = $(AGENT_IMAGE)\n" + @printf "ALLOY_IMAGE = $(ALLOY_IMAGE)\n" @printf "BUILD_IMAGE = $(BUILD_IMAGE)\n" - @printf "AGENT_BINARY = $(AGENT_BINARY)\n" + @printf "ALLOY_BINARY = $(ALLOY_BINARY)\n" @printf "GOOS = $(GOOS)\n" @printf "GOARCH = $(GOARCH)\n" @printf "GOARM = $(GOARM)\n" diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 4e8ae49b39..4b4f891974 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -36,7 +36,6 @@ RUN wget -nv https://nsis.sourceforge.io/mediawiki/images/4/4a/AccessControl.zip # Dependency: Go and Go dependencies FROM ${GO_RUNTIME} as golang -# Keep in sync with cmd/grafana-agent-operator/DEVELOPERS.md ENV CONTROLLER_GEN_VERSION v0.9.2 RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@$CONTROLLER_GEN_VERSION \ diff --git a/cmd/grafana-agent-service/config_windows.go b/cmd/alloy-service/config_windows.go similarity index 70% rename from cmd/grafana-agent-service/config_windows.go rename to cmd/alloy-service/config_windows.go index 90e0de4480..fb2a72122e 100644 --- a/cmd/grafana-agent-service/config_windows.go +++ b/cmd/alloy-service/config_windows.go @@ -9,20 +9,20 @@ import ( // config holds configuration options to run the service. type config struct { - // ServicePath points to the path of the managed Grafana Agent binary. + // ServicePath points to the path of the managed Alloy binary. ServicePath string - // Args holds arguments to pass to the Grafana Agent binary. os.Args[0] is - // not included. + // Args holds arguments to pass to the Alloy binary. os.Args[0] is not + // included. Args []string - // Environment holds environment variables for the Grafana Agent service. + // Environment holds environment variables for the Alloy service. // Each item represents an environment variable in form "key=value". // All environments variables from the current process with be merged into Environment Environment []string - // WorkingDirectory points to the working directory to run the Grafana Agent - // binary from. + // WorkingDirectory points to the working directory to run the Alloy binary + // from. WorkingDirectory string } @@ -32,22 +32,22 @@ func loadConfig() (*config, error) { // able to either migrate from the old key to the new key or supporting // both the old and the new key at the same time. - agentKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Grafana\Grafana Agent`, registry.READ) + alloyKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Grafana\Alloy`, registry.READ) if err != nil { return nil, fmt.Errorf("failed to open registry: %w", err) } - servicePath, _, err := agentKey.GetStringValue("") + servicePath, _, err := alloyKey.GetStringValue("") if err != nil { return nil, fmt.Errorf("failed to retrieve key (Default): %w", err) } - args, _, err := agentKey.GetStringsValue("Arguments") + args, _, err := alloyKey.GetStringsValue("Arguments") if err != nil { return nil, fmt.Errorf("failed to retrieve key Arguments: %w", err) } - env, _, err := agentKey.GetStringsValue("Environment") + env, _, err := alloyKey.GetStringsValue("Environment") if err != nil { return nil, fmt.Errorf("failed to retrieve key Environment: %w", err) } diff --git a/cmd/alloy-service/doc.go b/cmd/alloy-service/doc.go new file mode 100644 index 0000000000..04be971702 --- /dev/null +++ b/cmd/alloy-service/doc.go @@ -0,0 +1,3 @@ +// Command alloy-service is a Windows binary which manages Alloy as a Windows +// service. +package main diff --git a/cmd/grafana-agent-service/logger_windows.go b/cmd/alloy-service/logger_windows.go similarity index 100% rename from cmd/grafana-agent-service/logger_windows.go rename to cmd/alloy-service/logger_windows.go diff --git a/cmd/grafana-agent-service/main_windows.go b/cmd/alloy-service/main_windows.go similarity index 92% rename from cmd/grafana-agent-service/main_windows.go rename to cmd/alloy-service/main_windows.go index f37cb6f5d8..ea86ce234f 100644 --- a/cmd/grafana-agent-service/main_windows.go +++ b/cmd/alloy-service/main_windows.go @@ -11,7 +11,7 @@ import ( "golang.org/x/sys/windows/svc" ) -const serviceName = "Grafana Agent" +const serviceName = "Grafana Alloy" func main() { logger, err := newLogger() @@ -39,21 +39,21 @@ func main() { Stderr: logger, } - as := &agentService{logger: logger, cfg: cfg} + as := &alloyService{logger: logger, cfg: cfg} if err := svc.Run(serviceName, as); err != nil { level.Error(logger).Log("msg", "failed to run service", "err", err) os.Exit(1) } } -type agentService struct { +type alloyService struct { logger log.Logger cfg serviceManagerConfig } const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown -func (as *agentService) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { +func (as *alloyService) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { defer func() { s <- svc.Status{State: svc.Stopped} }() diff --git a/cmd/grafana-agent-service/service.go b/cmd/alloy-service/service.go similarity index 100% rename from cmd/grafana-agent-service/service.go rename to cmd/alloy-service/service.go diff --git a/cmd/grafana-agent-service/service_test.go b/cmd/alloy-service/service_test.go similarity index 100% rename from cmd/grafana-agent-service/service_test.go rename to cmd/alloy-service/service_test.go diff --git a/cmd/grafana-agent-service/testdata/example_service.go b/cmd/alloy-service/testdata/example_service.go similarity index 100% rename from cmd/grafana-agent-service/testdata/example_service.go rename to cmd/alloy-service/testdata/example_service.go diff --git a/cmd/grafana-agent/Dockerfile b/cmd/alloy/Dockerfile similarity index 62% rename from cmd/grafana-agent/Dockerfile rename to cmd/alloy/Dockerfile index e7cf42a67f..7b15b82548 100644 --- a/cmd/grafana-agent/Dockerfile +++ b/cmd/alloy/Dockerfile @@ -14,12 +14,12 @@ ARG RELEASE_BUILD=1 ARG VERSION ARG GOEXPERIMENT -COPY . /src/agent -WORKDIR /src/agent +COPY . /src/alloy +WORKDIR /src/alloy -# Build the UI before building the agent, which will then bake the final UI -# into the binary. -RUN --mount=type=cache,target=/src/agent/web/ui/node_modules,sharing=locked \ +# Build the UI before building Alloy, which will then bake the final UI into +# the binary. +RUN --mount=type=cache,target=/src/alloy/web/ui/node_modules,sharing=locked \ make generate-ui RUN --mount=type=cache,target=/root/.cache/go-build \ @@ -28,15 +28,15 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} \ GO_TAGS="netgo builtinassets promtail_journal_enabled" \ GOEXPERIMENT=${GOEXPERIMENT} \ - make agent + make alloy FROM public.ecr.aws/ubuntu/ubuntu:mantic -#Username and uid for grafana-agent user +# Username and uid for alloy user ARG UID=473 -ARG USERNAME="grafana-agent" +ARG USERNAME="alloy" -LABEL org.opencontainers.image.source="https://github.com/grafana/agent" +LABEL org.opencontainers.image.source="https://github.com/grafana/alloy" # Install dependencies needed at runtime. RUN < +go build ./cmd/alloy/ +./alloy -config.file= # For testing: make lint test # Make sure all the tests pass before you commit and push :) @@ -62,8 +62,8 @@ To build Grafana Agent from source code, please install the following tools: You can directly use the go tool to download and install the agent binary into your GOPATH: - $ GO111MODULE=on go install github.com/grafana/agent/cmd/grafana-agent - $ grafana-agent -config.file=your_config.yml + $ GO111MODULE=on go install github.com/grafana/agent/cmd/alloy + $ alloy run your_config.river An example of the above configuration file can be found [here][example-config]. @@ -74,7 +74,7 @@ You can also clone the repository yourself and build using `make agent`: $ git clone https://github.com/grafana/agent.git $ cd agent $ make agent - $ ./build/grafana-agent -config.file=your_config.yml + $ ./build/alloy run your_config.river The Makefile provides several targets: @@ -205,5 +205,5 @@ a hard fork (i.e., creating a new Go module with the same source). [CLA]: https://cla-assistant.io/grafana/agent [good-first-issue]: https://github.com/grafana/agent/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [community-slack]: https://slack.grafana.com/ -[example-config]: ../../cmd/grafana-agent/agent-local-config.yaml +[example-config]: ../../cmd/alloy/example-config.river [go-modules]: https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more diff --git a/internal/cmd/integration-tests/README.md b/internal/cmd/integration-tests/README.md index 353c439d44..e4df4c257b 100644 --- a/internal/cmd/integration-tests/README.md +++ b/internal/cmd/integration-tests/README.md @@ -2,7 +2,7 @@ This document provides an outline of how to run and add new integration tests to the project. -The purpose of these tests is to verify simple, happy-path pipelines to catch issues between the agent and external dependencies. +The purpose of these tests is to verify simple, happy-path pipelines to catch issues between Alloy and external dependencies. The external dependencies are launched as Docker containers. @@ -16,7 +16,7 @@ Execute the integration tests using the following command: ### Flags -* `--skip-build`: Run the integration tests without building the agent (default: `false`) +* `--skip-build`: Run the integration tests without building Alloy (default: `false`) * `--test`: Specifies a particular directory within the tests directory to run (default: runs all tests) ## Adding new tests @@ -28,4 +28,4 @@ Follow these steps to add a new integration test to the project: 3. Within the new test directory, create a file named `config.river` to hold the pipeline configuration you want to test. 4. Create a `_test.go` file within the new test directory. This file should contain the Go code necessary to run the test and verify the data processing through the pipeline. - _NOTE_: The tests run concurrently. Each agent must tag its data with a label that corresponds to its specific configuration. This ensures the correct data verification during the Go testing process. + _NOTE_: The tests run concurrently. Each Alloy instance must tag its data with a label that corresponds to its specific configuration. This ensures the correct data verification during the Go testing process. diff --git a/internal/cmd/integration-tests/main.go b/internal/cmd/integration-tests/main.go index 7b48d21576..b21340ff34 100644 --- a/internal/cmd/integration-tests/main.go +++ b/internal/cmd/integration-tests/main.go @@ -20,7 +20,7 @@ func main() { } rootCmd.PersistentFlags().StringVar(&specificTest, "test", "", "Specific test directory to run") - rootCmd.PersistentFlags().BoolVar(&skipBuild, "skip-build", false, "Skip building the agent") + rootCmd.PersistentFlags().BoolVar(&skipBuild, "skip-build", false, "Skip building Alloy") if err := rootCmd.Execute(); err != nil { fmt.Println(err) @@ -33,7 +33,7 @@ func runIntegrationTests(cmd *cobra.Command, args []string) { defer cleanUpEnvironment() if !skipBuild { - buildAgent() + buildAlloy() } setupEnvironment() diff --git a/internal/cmd/integration-tests/utils.go b/internal/cmd/integration-tests/utils.go index 4b4291f3a2..baeeccb980 100644 --- a/internal/cmd/integration-tests/utils.go +++ b/internal/cmd/integration-tests/utils.go @@ -12,12 +12,12 @@ import ( ) const ( - agentBinaryPath = "../../../../../build/grafana-agent" + alloyBinaryPath = "../../../../../build/alloy" ) type TestLog struct { TestDir string - AgentLog string + AlloyLog string TestOutput string } @@ -33,8 +33,8 @@ func executeCommand(command string, args []string, taskDescription string) { } } -func buildAgent() { - executeCommand("make", []string{"-C", "../../..", "agent"}, "Building agent") +func buildAlloy() { + executeCommand("make", []string{"-C", "../../..", "alloy"}, "Building Alloy") } func setupEnvironment() { @@ -52,16 +52,16 @@ func runSingleTest(testDir string, port int) { dirName := filepath.Base(testDir) - var agentLogBuffer bytes.Buffer - cmd := exec.Command(agentBinaryPath, "run", "config.river", "--server.http.listen-addr", fmt.Sprintf("0.0.0.0:%d", port)) + var alloyLogBuffer bytes.Buffer + cmd := exec.Command(alloyBinaryPath, "run", "config.river", "--server.http.listen-addr", fmt.Sprintf("0.0.0.0:%d", port)) cmd.Dir = testDir - cmd.Stdout = &agentLogBuffer - cmd.Stderr = &agentLogBuffer + cmd.Stdout = &alloyLogBuffer + cmd.Stderr = &alloyLogBuffer if err := cmd.Start(); err != nil { logChan <- TestLog{ TestDir: dirName, - AgentLog: fmt.Sprintf("Failed to start agent: %v", err), + AlloyLog: fmt.Sprintf("Failed to start Alloy: %v", err), } return } @@ -75,12 +75,12 @@ func runSingleTest(testDir string, port int) { panic(err) } - agentLog := agentLogBuffer.String() + alloyLog := alloyLogBuffer.String() if errTest != nil { logChan <- TestLog{ TestDir: dirName, - AgentLog: agentLog, + AlloyLog: alloyLog, TestOutput: string(testOutput), } } @@ -129,7 +129,7 @@ func reportResults() { } fmt.Printf("Failure detected in %s:\n", log.TestDir) fmt.Println("Test output:", log.TestOutput) - fmt.Println("Agent logs:", log.AgentLog) + fmt.Println("Alloy logs:", log.AlloyLog) testsFailed++ } diff --git a/internal/tools/packaging_test/agent_linux_packages_test.go b/internal/tools/packaging_test/agent_linux_packages_test.go index 448daa698b..9bee704566 100644 --- a/internal/tools/packaging_test/agent_linux_packages_test.go +++ b/internal/tools/packaging_test/agent_linux_packages_test.go @@ -15,12 +15,12 @@ import ( "github.com/stretchr/testify/require" ) -// TestAgentLinuxPackages runs the entire test suite for the Linux packages. -func TestAgentLinuxPackages(t *testing.T) { - packageName := "grafana-agent" +// TestAlloyLinuxPackages runs the entire test suite for the Linux packages. +func TestAlloyLinuxPackages(t *testing.T) { + packageName := "alloy" fmt.Println("Building packages (this may take a while...)") - buildAgentPackages(t) + buildAlloyPackages(t) dockerPool, err := dockertest.NewPool("") require.NoError(t, err) @@ -53,7 +53,7 @@ func TestAgentLinuxPackages(t *testing.T) { } } -func buildAgentPackages(t *testing.T) { +func buildAlloyPackages(t *testing.T) { t.Helper() wd, err := os.Getwd() diff --git a/packaging/grafana-agent/grafana-agent.river b/packaging/alloy/alloy.river similarity index 87% rename from packaging/grafana-agent/grafana-agent.river rename to packaging/alloy/alloy.river index 56a8ea5b4a..34f6d0abe3 100644 --- a/packaging/grafana-agent/grafana-agent.river +++ b/packaging/alloy/alloy.river @@ -1,6 +1,6 @@ -// Sample config for Grafana Agent. +// Sample config for Alloy. // -// For a full configuration reference, see https://grafana.com/docs/agent +// For a full configuration reference, see https://grafana.com/docs/alloy logging { level = "warn" } @@ -15,7 +15,7 @@ prometheus.scrape "default" { prometheus.exporter.unix.default.targets, [{ // Self-collect metrics - job = "agent", + job = "alloy", __address__ = "127.0.0.1:12345", }], ) diff --git a/packaging/alloy/deb/alloy.service b/packaging/alloy/deb/alloy.service new file mode 100644 index 0000000000..d525e159e5 --- /dev/null +++ b/packaging/alloy/deb/alloy.service @@ -0,0 +1,20 @@ +[Unit] +Description= Vendor-agnostic OpenTelemetry Collector distribution with programmable pipelines +Documentation=https://grafana.com/docs/alloy +Wants=network-online.target +After=network-online.target + +[Service] +Restart=always +User=alloy +Environment=HOSTNAME=%H +Environment=AGENT_DEPLOY_MODE=deb +EnvironmentFile=/etc/default/alloy +WorkingDirectory=/var/lib/alloy +ExecStart=/usr/bin/alloy run $CUSTOM_ARGS --storage.path=/var/lib/alloy $CONFIG_FILE +ExecReload=/usr/bin/env kill -HUP $MAINPID +TimeoutStopSec=20s +SendSIGKILL=no + +[Install] +WantedBy=multi-user.target diff --git a/packaging/alloy/deb/control/postinst b/packaging/alloy/deb/control/postinst new file mode 100644 index 0000000000..79631bec0c --- /dev/null +++ b/packaging/alloy/deb/control/postinst @@ -0,0 +1,41 @@ +#!/bin/sh + +set -e + +# shellcheck disable=SC1091 +[ -f /etc/default/alloy ] && . /etc/default/alloy + +# Initial installation: $1 == configure +# Upgrade: $1 == configure, $2 == old version +case "$1" in + configure) + [ -z "$ALLOY_USER" ] && ALLOY_USER="alloy" + [ -z "$ALLOY_GROUP" ] && ALLOY_GROUP="alloy" + if ! getent group "$ALLOY_GROUP" > /dev/null 2>&1 ; then + groupadd -r "$ALLOY_GROUP" + fi + if ! getent passwd "$ALLOY_USER" > /dev/null 2>&1 ; then + useradd -m -r -g "$ALLOY_GROUP" -d /var/lib/alloy -s /sbin/nologin -c "alloy user" "$ALLOY_USER" + fi + + # Add Alloy user to groups used for reading logs. + if getent group adm > /dev/null 2>&1 ; then + usermod -a -G adm "$ALLOY_USER" + fi + if getent group systemd-journal > /dev/null 2>&1 ; then + usermod -a -G systemd-journal "$ALLOY_USER" + fi + + chown $ALLOY_USER:$ALLOY_GROUP /var/lib/alloy + chmod 770 /var/lib/alloy + + chmod 640 /etc/alloy.river + chown root:$ALLOY_GROUP /etc/alloy.river + + if [ -z ${2+x} ] && [ "$RESTART_ON_UPGRADE" = "true" ]; then + if command -v systemctl 2>/dev/null; then + systemctl daemon-reload + systemctl restart alloy + fi + fi +esac diff --git a/packaging/grafana-agent/deb/control/prerm b/packaging/alloy/deb/control/prerm similarity index 50% rename from packaging/grafana-agent/deb/control/prerm rename to packaging/alloy/deb/control/prerm index 1641bcc1b3..9730285248 100644 --- a/packaging/grafana-agent/deb/control/prerm +++ b/packaging/alloy/deb/control/prerm @@ -3,10 +3,10 @@ set -e # shellcheck disable=SC1091 -[ -f /etc/default/grafana-agent ] && . /etc/default/grafana-agent +[ -f /etc/default/alloy ] && . /etc/default/alloy if [ "$1" = "remove" ]; then if command -v systemctl 2>/dev/null; then - systemctl stop grafana-agent.service > /dev/null 2>&1 || : + systemctl stop alloy.service > /dev/null 2>&1 || : fi fi diff --git a/packaging/alloy/environment-file b/packaging/alloy/environment-file new file mode 100644 index 0000000000..2f2f7826f6 --- /dev/null +++ b/packaging/alloy/environment-file @@ -0,0 +1,16 @@ +## Path: +## Description: Grafana Alloy settings +## Type: string +## Default: "" +## ServiceRestart: alloy +# +# Command line options for Alloy. +# +# The configuration file holding the Alloy config. +CONFIG_FILE="/etc/alloy.river" + +# User-defined arguments to pass to the run command. +CUSTOM_ARGS="" + +# Restart on system upgrade. Defaults to true. +RESTART_ON_UPGRADE=true diff --git a/packaging/alloy/rpm/alloy.service b/packaging/alloy/rpm/alloy.service new file mode 100644 index 0000000000..c42dcb6a6a --- /dev/null +++ b/packaging/alloy/rpm/alloy.service @@ -0,0 +1,20 @@ +[Unit] +Description= Vendor-agnostic OpenTelemetry Collector distribution with programmable pipelines +Documentation=https://grafana.com/docs/alloy +Wants=network-online.target +After=network-online.target + +[Service] +Restart=always +User=alloy +Environment=HOSTNAME=%H +Environment=AGENT_DEPLOY_MODE=rpm +EnvironmentFile=/etc/sysconfig/alloy +WorkingDirectory=/var/lib/alloy +ExecStart=/usr/bin/alloy run $CUSTOM_ARGS --storage.path=/var/lib/alloy $CONFIG_FILE +ExecReload=/usr/bin/env kill -HUP $MAINPID +TimeoutStopSec=20s +SendSIGKILL=no + +[Install] +WantedBy=multi-user.target diff --git a/packaging/alloy/rpm/control/postinst b/packaging/alloy/rpm/control/postinst new file mode 100644 index 0000000000..963664c935 --- /dev/null +++ b/packaging/alloy/rpm/control/postinst @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +# shellcheck disable=SC1091 +[ -f /etc/sysconfig/alloy ] && . /etc/sysconfig/alloy +[ -z "$ALLOY_USER" ] && ALLOY_USER="alloy" +[ -z "$ALLOY_GROUP" ] && ALLOY_GROUP="alloy" + +add_to_logging_groups() { + # Add Alloy user to groups used for reading logs. + if getent group adm > /dev/null 2>&1 ; then + usermod -a -G adm "$ALLOY_USER" + fi + if getent group systemd-journal > /dev/null 2>&1 ; then + usermod -a -G systemd-journal "$ALLOY_USER" + fi +} + +# Initial installation: $1 == 1 +# Upgrade: $1 == 2, and configured to restart on upgrade +if [ "$1" -eq 1 ] ; then + if ! getent group "$ALLOY_GROUP" > /dev/null 2>&1 ; then + groupadd -r "$ALLOY_GROUP" + fi + if ! getent passwd "$ALLOY_USER" > /dev/null 2>&1 ; then + useradd -r -m -g "$ALLOY_GROUP" -d /var/lib/alloy -s /sbin/nologin -c "alloy user" "$ALLOY_USER" + fi + + add_to_logging_groups + + chown $ALLOY_USER:$ALLOY_GROUP /var/lib/alloy + chmod 770 /var/lib/alloy + + chmod 640 /etc/alloy.river + chown root:$ALLOY_GROUP /etc/alloy.river + +elif [ "$1" -ge 2 ] ; then + add_to_logging_groups + + if [ "$RESTART_ON_UPGRADE" = "true" ]; then + systemctl daemon-reload + systemctl restart alloy + fi +fi diff --git a/packaging/alloy/rpm/control/prerm b/packaging/alloy/rpm/control/prerm new file mode 100644 index 0000000000..adc876ab40 --- /dev/null +++ b/packaging/alloy/rpm/control/prerm @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e + +# shellcheck disable=SC1091 +[ -f /etc/sysconfig/alloy ] && . /etc/sysconfig/alloy + +# final uninstallation $1=0 +# If other copies of this RPM are installed, then $1>0 + +if [ "$1" -eq 0 ] ; then + if [ -x /bin/systemctl ] ; then + /bin/systemctl stop alloy.service > /dev/null 2>&1 || : + elif [ -x /etc/init.d/alloy ] ; then + /etc/init.d/alloy stop + elif [ -x /etc/rc.d/init.d/alloy ] ; then + /etc/rc.d/init.d/alloy stop + fi +fi +exit 0 diff --git a/packaging/grafana-agent/rpm/gpg-sign.sh b/packaging/alloy/rpm/gpg-sign.sh similarity index 100% rename from packaging/grafana-agent/rpm/gpg-sign.sh rename to packaging/alloy/rpm/gpg-sign.sh diff --git a/packaging/grafana-agent/windows/config.river b/packaging/alloy/windows/config.river similarity index 100% rename from packaging/grafana-agent/windows/config.river rename to packaging/alloy/windows/config.river diff --git a/packaging/grafana-agent/windows/install_script.nsis b/packaging/alloy/windows/install_script.nsis similarity index 88% rename from packaging/grafana-agent/windows/install_script.nsis rename to packaging/alloy/windows/install_script.nsis index f4b1aa19c3..099eace2d3 100644 --- a/packaging/grafana-agent/windows/install_script.nsis +++ b/packaging/alloy/windows/install_script.nsis @@ -1,6 +1,6 @@ # This script does the following: # -# 1. Installs grafana-agent-windows-amd64.exe, grafana-agent-service-amd64.exe, and logo.ico. +# 1. Installs alloy-windows-amd64.exe, alloy-service-amd64.exe, and logo.ico. # 2. Creates a Start Menu shortcut. # 3. Builds an uninstaller. # 4. Adds uninstall information to the registry for Add/Remove Programs. @@ -12,7 +12,7 @@ Unicode true !include FileFunc.nsh !include .\macros.nsis -!define APPNAME "Grafana Agent" +!define APPNAME "Grafana Alloy" !define HELPURL "https://grafana.com/docs/alloy/latest" !define UPDATEURL "https://github.com/grafana/alloy/releases" !define ABOUTURL "https://github.com/grafana/alloy" @@ -59,7 +59,7 @@ Section "install" # stack, and must be popped after calling. # Preemptively stop the existing service if it's running. - nsExec::ExecToLog 'sc stop "Grafana Agent"' + nsExec::ExecToLog 'sc stop "Grafana Alloy"' Pop $0 # Configure the out path and copy files to it. @@ -70,8 +70,8 @@ Section "install" Exists: SetOutPath "$INSTDIR" - File "..\..\..\dist.temp\grafana-agent-windows-amd64.exe" - File "..\..\..\dist.temp\grafana-agent-service-windows-amd64.exe" + File "..\..\..\dist.temp\alloy-windows-amd64.exe" + File "..\..\..\dist.temp\alloy-service-windows-amd64.exe" File "logo.ico" # Create an uninstaller at the same pathFunctionEnd @@ -97,15 +97,15 @@ Section "install" Call InitializeRegistry # Create the service. - nsExec::ExecToLog 'sc create "Grafana Agent" start= delayed-auto binpath= "$INSTDIR\grafana-agent-service-windows-amd64.exe"' + nsExec::ExecToLog 'sc create "Grafana Alloy" start= delayed-auto binpath= "$INSTDIR\alloy-service-windows-amd64.exe"' Pop $0 # Start the service. - nsExec::ExecToLog 'sc start "Grafana Agent"' + nsExec::ExecToLog 'sc start "Grafana Alloy"' Pop $0 - # Auto-restart agent on failure. Reset failure counter after 60 seconds without failure - nsExec::ExecToLog `sc failure "Grafana Agent" reset= 60 actions= restart/5000 reboot= "Grafana Agent has failed. Restarting in 5 seconds"` + # Auto-restart Alloy on failure. Reset failure counter after 60 seconds without failure + nsExec::ExecToLog `sc failure "Grafana Alloy" reset= 60 actions= restart/5000 reboot= "Grafana Alloy has failed. Restarting in 5 seconds"` Pop $0 SectionEnd @@ -129,13 +129,13 @@ FunctionEnd # InitializeRegistry initializes the keys in the registry that the service # runner uses. If the registry values already exist, they are not overwritten. Function InitializeRegistry - !define REGKEY "HKLM\Software\Grafana\Grafana Agent" + !define REGKEY "HKLM\Software\Grafana\Alloy" # Define the default key, which points to the service. nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /ve' Pop $0 ${If} $0 == 1 - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /ve /d "$INSTDIR\grafana-agent-windows-amd64.exe"' + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /ve /d "$INSTDIR\alloy-windows-amd64.exe"' Pop $0 # Ignore return result ${EndIf} @@ -209,9 +209,9 @@ Section "uninstall" DetailPrint "Starting uninstaller." # Stop and remove service. - nsExec::ExecToLog 'sc stop "Grafana Agent"' + nsExec::ExecToLog 'sc stop "Grafana Alloy"' Pop $0 - nsExec::ExecToLog 'sc delete "Grafana Agent"' + nsExec::ExecToLog 'sc delete "Grafana Alloy"' Pop $0 RMDir /r "$SMPROGRAMS\${APPNAME}" # Start Menu folder. @@ -219,7 +219,7 @@ Section "uninstall" RMDir /r "$APPDATA\${APPNAME}" # Application data. # Remove service and uninstaller information from the registry. - nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Grafana\Grafana Agent" /reg:64 /f' + nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Grafana\Alloy" /reg:64 /f' Pop $0 nsExec::ExecToLog 'Reg.exe delete "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}" /reg:64 /f' Pop $0 diff --git a/packaging/grafana-agent/windows/logo.ico b/packaging/alloy/windows/logo.ico similarity index 100% rename from packaging/grafana-agent/windows/logo.ico rename to packaging/alloy/windows/logo.ico diff --git a/packaging/grafana-agent/windows/macros.nsis b/packaging/alloy/windows/macros.nsis similarity index 100% rename from packaging/grafana-agent/windows/macros.nsis rename to packaging/alloy/windows/macros.nsis diff --git a/packaging/grafana-agent/deb/control/postinst b/packaging/grafana-agent/deb/control/postinst deleted file mode 100644 index a06af7a86f..0000000000 --- a/packaging/grafana-agent/deb/control/postinst +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -set -e - -# shellcheck disable=SC1091 -[ -f /etc/default/grafana-agent ] && . /etc/default/grafana-agent - -# Initial installation: $1 == configure -# Upgrade: $1 == configure, $2 == old version -case "$1" in - configure) - [ -z "$GRAFANA_AGENT_USER" ] && GRAFANA_AGENT_USER="grafana-agent" - [ -z "$GRAFANA_AGENT_GROUP" ] && GRAFANA_AGENT_GROUP="grafana-agent" - if ! getent group "$GRAFANA_AGENT_GROUP" > /dev/null 2>&1 ; then - groupadd -r "$GRAFANA_AGENT_GROUP" - fi - if ! getent passwd "$GRAFANA_AGENT_USER" > /dev/null 2>&1 ; then - useradd -m -r -g "$GRAFANA_AGENT_GROUP" -d /var/lib/grafana-agent -s /sbin/nologin -c "grafana-agent user" "$GRAFANA_AGENT_USER" - fi - - # Add grafana agent user to groups used for reading logs. - if getent group adm > /dev/null 2>&1 ; then - usermod -a -G adm "$GRAFANA_AGENT_USER" - fi - if getent group systemd-journal > /dev/null 2>&1 ; then - usermod -a -G systemd-journal "$GRAFANA_AGENT_USER" - fi - - chown $GRAFANA_AGENT_USER:$GRAFANA_AGENT_GROUP /var/lib/grafana-agent - chmod 770 /var/lib/grafana-agent - - chmod 640 /etc/grafana-agent.river - chown root:$GRAFANA_AGENT_GROUP /etc/grafana-agent.river - - if [ -z ${2+x} ] && [ "$RESTART_ON_UPGRADE" = "true" ]; then - if command -v systemctl 2>/dev/null; then - systemctl daemon-reload - systemctl restart grafana-agent - fi - fi -esac diff --git a/packaging/grafana-agent/deb/grafana-agent.service b/packaging/grafana-agent/deb/grafana-agent.service deleted file mode 100644 index 7177972b45..0000000000 --- a/packaging/grafana-agent/deb/grafana-agent.service +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Vendor-neutral programmable observability pipelines. -Documentation=https://grafana.com/docs/agent -Wants=network-online.target -After=network-online.target - -[Service] -Restart=always -User=grafana-agent -Environment=HOSTNAME=%H -Environment=AGENT_DEPLOY_MODE=deb -EnvironmentFile=/etc/default/grafana-agent -WorkingDirectory=/var/lib/grafana-agent -ExecStart=/usr/bin/grafana-agent run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent $CONFIG_FILE -ExecReload=/usr/bin/env kill -HUP $MAINPID -TimeoutStopSec=20s -SendSIGKILL=no - -[Install] -WantedBy=multi-user.target diff --git a/packaging/grafana-agent/environment-file b/packaging/grafana-agent/environment-file deleted file mode 100644 index e8b15ea74a..0000000000 --- a/packaging/grafana-agent/environment-file +++ /dev/null @@ -1,16 +0,0 @@ -## Path: -## Description: Grafana Agent settings -## Type: string -## Default: "" -## ServiceRestart: grafana-agent -# -# Command line options for grafana-agent -# -# The configuration file holding the agent config. -CONFIG_FILE="/etc/grafana-agent.river" - -# User-defined arguments to pass to the run command. -CUSTOM_ARGS="" - -# Restart on system upgrade. Defaults to true. -RESTART_ON_UPGRADE=true diff --git a/packaging/grafana-agent/rpm/control/postinst b/packaging/grafana-agent/rpm/control/postinst deleted file mode 100644 index e908923dc2..0000000000 --- a/packaging/grafana-agent/rpm/control/postinst +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -set -e - -# NOTE: the grafana-agent group is shared with the grafana-agent package to -# make it easier to migrate between the two. A unique user is still used to -# give them different home directories. - -# shellcheck disable=SC1091 -[ -f /etc/sysconfig/grafana-agent ] && . /etc/sysconfig/grafana-agent -[ -z "$AGENT_USER" ] && AGENT_USER="grafana-agent" -[ -z "$AGENT_GROUP" ] && AGENT_GROUP="grafana-agent" - -add_to_logging_groups() { - # Add grafana agent user to groups used for reading logs. - if getent group adm > /dev/null 2>&1 ; then - usermod -a -G adm "$AGENT_USER" - fi - if getent group systemd-journal > /dev/null 2>&1 ; then - usermod -a -G systemd-journal "$AGENT_USER" - fi -} - -# Initial installation: $1 == 1 -# Upgrade: $1 == 2, and configured to restart on upgrade -if [ "$1" -eq 1 ] ; then - if ! getent group "$AGENT_GROUP" > /dev/null 2>&1 ; then - groupadd -r "$AGENT_GROUP" - fi - if ! getent passwd "$AGENT_USER" > /dev/null 2>&1 ; then - useradd -r -m -g "$AGENT_GROUP" -d /var/lib/grafana-agent -s /sbin/nologin -c "grafana-agent user" "$AGENT_USER" - fi - - add_to_logging_groups - - chown $AGENT_USER:$AGENT_GROUP /var/lib/grafana-agent - chmod 770 /var/lib/grafana-agent - - chmod 640 /etc/grafana-agent.river - chown root:$AGENT_GROUP /etc/grafana-agent.river - -elif [ "$1" -ge 2 ] ; then - add_to_logging_groups - - if [ "$RESTART_ON_UPGRADE" = "true" ]; then - systemctl daemon-reload - systemctl restart grafana-agent - fi -fi diff --git a/packaging/grafana-agent/rpm/control/prerm b/packaging/grafana-agent/rpm/control/prerm deleted file mode 100644 index 7acc646efe..0000000000 --- a/packaging/grafana-agent/rpm/control/prerm +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -set -e - -# shellcheck disable=SC1091 -[ -f /etc/sysconfig/grafana-agent ] && . /etc/sysconfig/grafana-agent - -# final uninstallation $1=0 -# If other copies of this RPM are installed, then $1>0 - -if [ "$1" -eq 0 ] ; then - if [ -x /bin/systemctl ] ; then - /bin/systemctl stop grafana-agent.service > /dev/null 2>&1 || : - elif [ -x /etc/init.d/grafana-agent ] ; then - /etc/init.d/grafana-agent stop - elif [ -x /etc/rc.d/init.d/grafana-agent ] ; then - /etc/rc.d/init.d/grafana-agent stop - fi -fi -exit 0 diff --git a/packaging/grafana-agent/rpm/grafana-agent.service b/packaging/grafana-agent/rpm/grafana-agent.service deleted file mode 100644 index ea3165626d..0000000000 --- a/packaging/grafana-agent/rpm/grafana-agent.service +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Vendor-neutral programmable observability pipelines. -Documentation=https://grafana.com/docs/agent -Wants=network-online.target -After=network-online.target - -[Service] -Restart=always -User=grafana-agent -Environment=HOSTNAME=%H -Environment=AGENT_DEPLOY_MODE=rpm -EnvironmentFile=/etc/sysconfig/grafana-agent -WorkingDirectory=/var/lib/grafana-agent -ExecStart=/usr/bin/grafana-agent run $CUSTOM_ARGS --storage.path=/var/lib/grafana-agent $CONFIG_FILE -ExecReload=/usr/bin/env kill -HUP $MAINPID -TimeoutStopSec=20s -SendSIGKILL=no - -[Install] -WantedBy=multi-user.target diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index a712dba8cc..cbd9d7430e 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -7,10 +7,10 @@ # from a Drone trigger. set -euxo pipefail -RELEASE_AGENT_IMAGE=grafana/agent -DEVELOPMENT_AGENT_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy +RELEASE_ALLOY_IMAGE=grafana/alloy +DEVELOPMENT_ALLOY_IMAGE=us-docker.pkg.dev/grafanalabs-dev/docker-alloy-dev/alloy -DEFAULT_AGENT_IMAGE=${RELEASE_AGENT_IMAGE} +DEFAULT_ALLOY_IMAGE=${RELEASE_ALLOY_IMAGE} # Environment variables used throughout this script. These must be set # otherwise bash will fail with an "unbound variable" error because of the `set @@ -23,10 +23,10 @@ export DRONE_TAG=${DRONE_TAG:-} export DEVELOPMENT=${DEVELOPMENT:-} if [ -n "$DEVELOPMENT" ]; then - DEFAULT_AGENT_IMAGE=${DEVELOPMENT_AGENT_IMAGE} + DEFAULT_ALLOY_IMAGE=${DEVELOPMENT_ALLOY_IMAGE} fi -export AGENT_IMAGE=${DEFAULT_AGENT_IMAGE} +export ALLOY_IMAGE=${DEFAULT_ALLOY_IMAGE} # We need to determine what version to assign to built binaries. If containers # are being built from a Drone tag trigger, we force the version to come from the @@ -62,19 +62,19 @@ fi export BUILD_PLATFORMS=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x case "$TARGET_CONTAINER" in - agent) + alloy) docker buildx build --push \ --platform $BUILD_PLATFORMS \ --build-arg RELEASE_BUILD=1 \ --build-arg VERSION="$VERSION" \ - -t "$AGENT_IMAGE:$TAG_VERSION" \ - -t "$AGENT_IMAGE:$BRANCH_TAG" \ - -f cmd/grafana-agent/Dockerfile \ + -t "$ALLOY_IMAGE:$TAG_VERSION" \ + -t "$ALLOY_IMAGE:$BRANCH_TAG" \ + -f cmd/alloy/Dockerfile \ . ;; *) - echo "Usage: $0 agent" + echo "Usage: $0 alloy" exit 1 ;; esac diff --git a/tools/ci/docker-containers-windows b/tools/ci/docker-containers-windows index e1b9f3eaed..f43db76536 100644 --- a/tools/ci/docker-containers-windows +++ b/tools/ci/docker-containers-windows @@ -22,8 +22,7 @@ export DRONE_BRANCH=${DRONE_BRANCH:-} export DOCKER_LOGIN=${DOCKER_LOGIN:-} export DOCKER_PASSWORD=${DOCKER_PASSWORD:-} -export AGENT_IMAGE=grafana/agent -export AGENTCTL_IMAGE=grafana/agentctl +export ALLOY_IMAGE=grafana/alloy if [ -n "$DRONE_TAG" ]; then VERSION=$DRONE_TAG @@ -50,28 +49,18 @@ else fi case "$TARGET_CONTAINER" in - agent) - docker build \ - -t "$AGENT_IMAGE:$VERSION_TAG" \ - -t "$AGENT_IMAGE:$BRANCH_TAG" \ - --build-arg VERSION="$VERSION" \ - --build-arg RELEASE_BUILD=1 \ - -f ./cmd/grafana-agent/Dockerfile.windows \ - . - ;; - - agentctl) - docker build \ - -t "$AGENTCTL_IMAGE:$VERSION_TAG" \ - -t "$AGENTCTL_IMAGE:$BRANCH_TAG" \ - --build-arg VERSION="$VERSION" \ - --build-arg RELEASE_BUILD=1 \ - -f ./cmd/grafana-agentctl/Dockerfile.windows \ + alloy) + docker build \ + -t "$ALLOY_IMAGE:$VERSION_TAG" \ + -t "$ALLOY_IMAGE:$BRANCH_TAG" \ + --build-arg VERSION="$VERSION" \ + --build-arg RELEASE_BUILD=1 \ + -f ./cmd/alloy/Dockerfile.windows \ . ;; *) - echo "Usage: $0 agent|agentctl" + echo "Usage: $0 alloy" exit 1 ;; esac @@ -81,18 +70,13 @@ if [ -n "$DOCKER_LOGIN" ] && [ -n "$DOCKER_PASSWORD" ]; then docker login -u "$DOCKER_LOGIN" -p "$DOCKER_PASSWORD" case "$TARGET_CONTAINER" in - agent) - docker push "$AGENT_IMAGE:$VERSION_TAG" - docker push "$AGENT_IMAGE:$BRANCH_TAG" - ;; - - agentctl) - docker push "$AGENTCTL_IMAGE:$VERSION_TAG" - docker push "$AGENTCTL_IMAGE:$BRANCH_TAG" + alloy) + docker push "$ALLOY_IMAGE:$VERSION_TAG" + docker push "$ALLOY_IMAGE:$BRANCH_TAG" ;; *) - echo "Usage: $0 agent|agentctl" + echo "Usage: $0 alloy" exit 1 ;; esac diff --git a/tools/make/build-container.mk b/tools/make/build-container.mk index 0daf5becb0..58d23e00b0 100644 --- a/tools/make/build-container.mk +++ b/tools/make/build-container.mk @@ -47,8 +47,8 @@ DOCKER_OPTS ?= -it # DOCKER_OPTS if they do. # -GO_CACHE_VOLUME := grafana-agent-build-container-gocache -GO_MODCACHE_VOLUME := grafana-agent-build-container-gomodcache +GO_CACHE_VOLUME := alloy-build-container-gocache +GO_MODCACHE_VOLUME := alloy-build-container-gomodcache define volume_exists $(shell docker volume inspect $(1) >/dev/null 2>&1 && echo 1 || echo "") diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index bd372f8165..dc24661f8a 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -3,7 +3,7 @@ PARENT_MAKEFILE := $(firstword $(MAKEFILE_LIST)) .PHONY: dist clean-dist -dist: dist-agent-binaries dist-agent-packages dist-agent-installer +dist: dist-alloy-binaries dist-alloy-packages dist-alloy-installer clean-dist: rm -rf ./dist/* ./dist.temp/* @@ -17,179 +17,179 @@ clean-dist: PACKAGING_VARS = RELEASE_BUILD=1 GO_TAGS="$(GO_TAGS)" GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) GOEXPERIMENT=$(GOEXPERIMENT) # -# agent release binaries +# Alloy release binaries # -dist-agent-binaries: dist/grafana-agent-linux-amd64 \ - dist/grafana-agent-linux-arm64 \ - dist/grafana-agent-linux-ppc64le \ - dist/grafana-agent-linux-s390x \ - dist/grafana-agent-darwin-amd64 \ - dist/grafana-agent-darwin-arm64 \ - dist/grafana-agent-windows-amd64.exe \ - dist/grafana-agent-freebsd-amd64 - -dist/grafana-agent-linux-amd64: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-amd64: GOOS := linux -dist/grafana-agent-linux-amd64: GOARCH := amd64 -dist/grafana-agent-linux-amd64: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-linux-arm64: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-arm64: GOOS := linux -dist/grafana-agent-linux-arm64: GOARCH := arm64 -dist/grafana-agent-linux-arm64: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-linux-ppc64le: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-ppc64le: GOOS := linux -dist/grafana-agent-linux-ppc64le: GOARCH := ppc64le -dist/grafana-agent-linux-ppc64le: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-linux-s390x: GO_TAGS += netgo builtinassets promtail_journal_enabled -dist/grafana-agent-linux-s390x: GOOS := linux -dist/grafana-agent-linux-s390x: GOARCH := s390x -dist/grafana-agent-linux-s390x: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-darwin-amd64: GO_TAGS += netgo builtinassets -dist/grafana-agent-darwin-amd64: GOOS := darwin -dist/grafana-agent-darwin-amd64: GOARCH := amd64 -dist/grafana-agent-darwin-amd64: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent - -dist/grafana-agent-darwin-arm64: GO_TAGS += netgo builtinassets -dist/grafana-agent-darwin-arm64: GOOS := darwin -dist/grafana-agent-darwin-arm64: GOARCH := arm64 -dist/grafana-agent-darwin-arm64: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent +dist-alloy-binaries: dist/alloy-linux-amd64 \ + dist/alloy-linux-arm64 \ + dist/alloy-linux-ppc64le \ + dist/alloy-linux-s390x \ + dist/alloy-darwin-amd64 \ + dist/alloy-darwin-arm64 \ + dist/alloy-windows-amd64.exe \ + dist/alloy-freebsd-amd64 + +dist/alloy-linux-amd64: GO_TAGS += netgo builtinassets promtail_journal_enabled +dist/alloy-linux-amd64: GOOS := linux +dist/alloy-linux-amd64: GOARCH := amd64 +dist/alloy-linux-amd64: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy + +dist/alloy-linux-arm64: GO_TAGS += netgo builtinassets promtail_journal_enabled +dist/alloy-linux-arm64: GOOS := linux +dist/alloy-linux-arm64: GOARCH := arm64 +dist/alloy-linux-arm64: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy + +dist/alloy-linux-ppc64le: GO_TAGS += netgo builtinassets promtail_journal_enabled +dist/alloy-linux-ppc64le: GOOS := linux +dist/alloy-linux-ppc64le: GOARCH := ppc64le +dist/alloy-linux-ppc64le: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy + +dist/alloy-linux-s390x: GO_TAGS += netgo builtinassets promtail_journal_enabled +dist/alloy-linux-s390x: GOOS := linux +dist/alloy-linux-s390x: GOARCH := s390x +dist/alloy-linux-s390x: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy + +dist/alloy-darwin-amd64: GO_TAGS += netgo builtinassets +dist/alloy-darwin-amd64: GOOS := darwin +dist/alloy-darwin-amd64: GOARCH := amd64 +dist/alloy-darwin-amd64: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy + +dist/alloy-darwin-arm64: GO_TAGS += netgo builtinassets +dist/alloy-darwin-arm64: GOOS := darwin +dist/alloy-darwin-arm64: GOARCH := arm64 +dist/alloy-darwin-arm64: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy # NOTE(rfratto): do not use netgo when building Windows binaries, which # prevents DNS short names from being resovable. See grafana/agent#4665. # # TODO(rfratto): add netgo back to Windows builds if a version of Go is # released which natively supports resolving DNS short names on Windows. -dist/grafana-agent-windows-amd64.exe: GO_TAGS += builtinassets -dist/grafana-agent-windows-amd64.exe: GOOS := windows -dist/grafana-agent-windows-amd64.exe: GOARCH := amd64 -dist/grafana-agent-windows-amd64.exe: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent +dist/alloy-windows-amd64.exe: GO_TAGS += builtinassets +dist/alloy-windows-amd64.exe: GOOS := windows +dist/alloy-windows-amd64.exe: GOARCH := amd64 +dist/alloy-windows-amd64.exe: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy # NOTE(rfratto): do not use netgo when building Windows binaries, which # prevents DNS short names from being resovable. See grafana/agent#4665. # # TODO(rfratto): add netgo back to Windows builds if a version of Go is # released which natively supports resolving DNS short names on Windows. -dist/grafana-agent-freebsd-amd64: GO_TAGS += netgo builtinassets -dist/grafana-agent-freebsd-amd64: GOOS := freebsd -dist/grafana-agent-freebsd-amd64: GOARCH := amd64 -dist/grafana-agent-freebsd-amd64: generate-ui - $(PACKAGING_VARS) AGENT_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent +dist/alloy-freebsd-amd64: GO_TAGS += netgo builtinassets +dist/alloy-freebsd-amd64: GOOS := freebsd +dist/alloy-freebsd-amd64: GOARCH := amd64 +dist/alloy-freebsd-amd64: generate-ui + $(PACKAGING_VARS) ALLOY_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy # -# agent-service release binaries. +# alloy-service release binaries. # -# agent-service release binaries are intermediate build assets used for +# alloy-service release binaries are intermediate build assets used for # producing Windows system packages. As such, they are built in a dist.temp # directory instead of the normal dist directory. # # Only targets needed for system packages are used here. # -dist-agent-service-binaries: dist.temp/grafana-agent-service-windows-amd64.exe +dist-alloy-service-binaries: dist.temp/alloy-service-windows-amd64.exe -dist.temp/grafana-agent-service-windows-amd64.exe: GO_TAGS += builtinassets -dist.temp/grafana-agent-service-windows-amd64.exe: GOOS := windows -dist.temp/grafana-agent-service-windows-amd64.exe: GOARCH := amd64 -dist.temp/grafana-agent-service-windows-amd64.exe: generate-ui - $(PACKAGING_VARS) SERVICE_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) agent-service +dist.temp/alloy-service-windows-amd64.exe: GO_TAGS += builtinassets +dist.temp/alloy-service-windows-amd64.exe: GOOS := windows +dist.temp/alloy-service-windows-amd64.exe: GOARCH := amd64 +dist.temp/alloy-service-windows-amd64.exe: generate-ui + $(PACKAGING_VARS) SERVICE_BINARY=$@ "$(MAKE)" -f $(PARENT_MAKEFILE) alloy-service # -# DEB and RPM grafana-agent packages. +# DEB and RPM alloy packages. # -AGENT_ENVIRONMENT_FILE_rpm := /etc/sysconfig/grafana-agent -AGENT_ENVIRONMENT_FILE_deb := /etc/default/grafana-agent +ALLOY_ENVIRONMENT_FILE_rpm := /etc/sysconfig/alloy +ALLOY_ENVIRONMENT_FILE_deb := /etc/default/alloy -# generate_agent_fpm(deb|rpm, package arch, agent arch, output file) -define generate_agent_fpm = - fpm -s dir -v $(AGENT_PACKAGE_VERSION) -a $(2) \ - -n grafana-agent --iteration $(AGENT_PACKAGE_RELEASE) -f \ +# generate_alloy_fpm(deb|rpm, package arch, Alloy arch, output file) +define generate_alloy_fpm = + fpm -s dir -v $(ALLOY_PACKAGE_VERSION) -a $(2) \ + -n alloy --iteration $(ALLOY_PACKAGE_RELEASE) -f \ --log error \ --license "Apache 2.0" \ --vendor "Grafana Labs" \ - --url "https://github.com/grafana/agent" \ + --url "https://github.com/grafana/alloy" \ --rpm-digest sha256 \ -t $(1) \ - --after-install packaging/grafana-agent/$(1)/control/postinst \ - --before-remove packaging/grafana-agent/$(1)/control/prerm \ - --config-files /etc/grafana-agent.river \ - --config-files $(AGENT_ENVIRONMENT_FILE_$(1)) \ + --after-install packaging/alloy/$(1)/control/postinst \ + --before-remove packaging/alloy/$(1)/control/prerm \ + --config-files /etc/alloy.river \ + --config-files $(ALLOY_ENVIRONMENT_FILE_$(1)) \ --rpm-rpmbuild-define "_build_id_links none" \ --package $(4) \ - dist/grafana-agent-linux-$(3)=/usr/bin/grafana-agent \ - packaging/grafana-agent-/grafana-agent.river=/etc/grafana-agent.river \ - packaging/grafana-agent-/environment-file=$(AGENT_ENVIRONMENT_FILE_$(1)) \ - packaging/grafana-agent-/$(1)/grafana-agent.service=/usr/lib/systemd/system/grafana-agent.service + dist/alloy-linux-$(3)=/usr/bin/alloy \ + packaging/alloy/alloy.river=/etc/alloy.river \ + packaging/alloy/environment-file=$(ALLOY_ENVIRONMENT_FILE_$(1)) \ + packaging/alloy/$(1)/alloy.service=/usr/lib/systemd/system/alloy.service endef -AGENT_PACKAGE_VERSION := $(patsubst v%,%,$(VERSION)) -AGENT_PACKAGE_RELEASE := 1 -AGENT_PACKAGE_PREFIX := dist/grafana-agent-$(AGENT_PACKAGE_VERSION)-$(AGENT_PACKAGE_RELEASE) +ALLOY_PACKAGE_VERSION := $(patsubst v%,%,$(VERSION)) +ALLOY_PACKAGE_RELEASE := 1 +ALLOY_PACKAGE_PREFIX := dist/alloy-$(ALLOY_PACKAGE_VERSION)-$(ALLOY_PACKAGE_RELEASE) -.PHONY: dist-agent-packages -dist-agent-packages: dist-agent-packages-amd64 \ - dist-agent-packages-arm64 \ - dist-agent-packages-ppc64le \ - dist-agent-packages-s390x +.PHONY: dist-alloy-packages +dist-alloy-packages: dist-alloy-packages-amd64 \ + dist-alloy-packages-arm64 \ + dist-alloy-packages-ppc64le \ + dist-alloy-packages-s390x -.PHONY: dist-agent-packages-amd64 -dist-agent-packages-amd64: dist/grafana-agent-linux-amd64 +.PHONY: dist-alloy-packages-amd64 +dist-alloy-packages-amd64: dist/grafana-alloy-linux-amd64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(call generate_agent_fpm,deb,amd64,amd64,$(AGENT_PACKAGE_PREFIX).amd64.deb) - $(call generate_agent_fpm,rpm,x86_64,amd64,$(AGENT_PACKAGE_PREFIX).amd64.rpm) + $(call generate_alloy_fpm,deb,amd64,amd64,$(ALLOY_PACKAGE_PREFIX).amd64.deb) + $(call generate_alloy_fpm,rpm,x86_64,amd64,$(ALLOY_PACKAGE_PREFIX).amd64.rpm) endif -.PHONY: dist-agent-packages-arm64 -dist-agent-packages-arm64: dist/grafana-agent-linux-arm64 +.PHONY: dist-alloy-packages-arm64 +dist-alloy-packages-arm64: dist/grafana-alloy-linux-arm64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(call generate_agent_fpm,deb,arm64,arm64,$(AGENT_PACKAGE_PREFIX).arm64.deb) - $(call generate_agent_fpm,rpm,aarch64,arm64,$(AGENT_PACKAGE_PREFIX).arm64.rpm) + $(call generate_alloy_fpm,deb,arm64,arm64,$(ALLOY_PACKAGE_PREFIX).arm64.deb) + $(call generate_alloy_fpm,rpm,aarch64,arm64,$(ALLOY_PACKAGE_PREFIX).arm64.rpm) endif -.PHONY: dist-agent-packages-ppc64le -dist-agent-packages-ppc64le: dist/grafana-agent-linux-ppc64le +.PHONY: dist-alloy-packages-ppc64le +dist-alloy-packages-ppc64le: dist/alloy-linux-ppc64le ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(call generate_agent_fpm,deb,ppc64el,ppc64le,$(AGENT_PACKAGE_PREFIX).ppc64el.deb) - $(call generate_agent_fpm,rpm,ppc64le,ppc64le,$(AGENT_PACKAGE_PREFIX).ppc64le.rpm) + $(call generate_alloy_fpm,deb,ppc64el,ppc64le,$(ALLOY_PACKAGE_PREFIX).ppc64el.deb) + $(call generate_alloy_fpm,rpm,ppc64le,ppc64le,$(ALLOY_PACKAGE_PREFIX).ppc64le.rpm) endif -.PHONY: dist-agent-packages-s390x -dist-agent-packages-s390x: dist/grafana-agent-linux-s390x +.PHONY: dist-alloy-packages-s390x +dist-alloy-packages-s390x: dist/alloy-linux-s390x ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - $(call generate_agent_fpm,deb,s390x,s390x,$(AGENT_PACKAGE_PREFIX).s390x.deb) - $(call generate_agent_fpm,rpm,s390x,s390x,$(AGENT_PACKAGE_PREFIX).s390x.rpm) + $(call generate_alloy_fpm,deb,s390x,s390x,$(ALLOY_PACKAGE_PREFIX).s390x.deb) + $(call generate_alloy_fpm,rpm,s390x,s390x,$(ALLOY_PACKAGE_PREFIX).s390x.rpm) endif # # Windows installer # -.PHONY: dist-agent-installer -dist-agent-installer: dist/grafana-agent-windows-amd64.exe dist.temp/grafana-agent-service-windows-amd64.exe +.PHONY: dist-alloy-installer +dist-alloy-installer: dist/alloy-windows-amd64.exe dist.temp/alloy-service-windows-amd64.exe ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else # quotes around mkdir are mandatory. ref: https://github.com/grafana/agent/pull/5664#discussion_r1378796371 "mkdir" -p dist - makensis -V4 -DVERSION=$(VERSION) -DOUT="../../../dist/grafana-agent-installer.exe" ./packaging/grafana-agent/windows/install_script.nsis + makensis -V4 -DVERSION=$(VERSION) -DOUT="../../../dist/alloy-installer.exe" ./packaging/alloy/windows/install_script.nsis endif From ed59d412e3fb08e55fed31b6101eba8c62d6b97f Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:12:47 -0400 Subject: [PATCH 040/136] syntax: rename module to github.com/grafana/alloy/syntax --- .../optional_secret.go | 8 ++-- .../optional_secret_test.go | 40 +++++++++---------- syntax/{rivertypes => alloytypes}/secret.go | 8 ++-- .../{rivertypes => alloytypes}/secret_test.go | 20 +++++----- syntax/ast/ast.go | 2 +- syntax/cmd/riverfmt/main.go | 6 +-- syntax/diag/diag.go | 2 +- syntax/diag/printer.go | 2 +- syntax/diag/printer_test.go | 4 +- syntax/encoding/riverjson/riverjson.go | 8 ++-- syntax/encoding/riverjson/riverjson_test.go | 10 ++--- syntax/go.mod | 2 +- syntax/internal/reflectutil/walk.go | 2 +- syntax/internal/reflectutil/walk_test.go | 4 +- syntax/internal/stdlib/stdlib.go | 6 +-- syntax/internal/syntaxtags/syntaxtags_test.go | 2 +- syntax/internal/value/decode.go | 2 +- .../internal/value/decode_benchmarks_test.go | 2 +- syntax/internal/value/decode_test.go | 2 +- syntax/internal/value/tag_cache.go | 2 +- syntax/internal/value/type_test.go | 2 +- syntax/internal/value/value.go | 2 +- syntax/internal/value/value_object.go | 2 +- syntax/internal/value/value_object_test.go | 2 +- syntax/internal/value/value_test.go | 2 +- syntax/parser/error_test.go | 6 +-- syntax/parser/internal.go | 8 ++-- syntax/parser/parser.go | 4 +- syntax/printer/printer.go | 4 +- syntax/printer/printer_test.go | 4 +- syntax/printer/walker.go | 4 +- syntax/scanner/identifier.go | 2 +- syntax/scanner/identifier_test.go | 2 +- syntax/scanner/scanner.go | 2 +- syntax/scanner/scanner_test.go | 2 +- syntax/{river.go => syntax.go} | 10 ++--- syntax/{river_test.go => syntax_test.go} | 4 +- syntax/token/builder/builder.go | 8 ++-- syntax/token/builder/builder_test.go | 8 ++-- syntax/token/builder/nested_defaults_test.go | 8 ++-- syntax/token/builder/token.go | 6 +-- syntax/token/builder/value_tokens.go | 6 +-- syntax/types.go | 4 +- syntax/vm/constant.go | 4 +- syntax/vm/error.go | 10 ++--- syntax/vm/op_binary.go | 12 +++--- syntax/vm/op_binary_test.go | 10 ++--- syntax/vm/op_unary.go | 4 +- syntax/vm/struct_decoder.go | 10 ++--- syntax/vm/tag_cache.go | 2 +- syntax/vm/vm.go | 12 +++--- syntax/vm/vm_benchmarks_test.go | 4 +- syntax/vm/vm_block_test.go | 6 +-- syntax/vm/vm_errors_test.go | 4 +- syntax/vm/vm_stdlib_test.go | 12 +++--- syntax/vm/vm_test.go | 8 ++-- 56 files changed, 167 insertions(+), 167 deletions(-) rename syntax/{rivertypes => alloytypes}/optional_secret.go (93%) rename syntax/{rivertypes => alloytypes}/optional_secret_test.go (62%) rename syntax/{rivertypes => alloytypes}/secret.go (91%) rename syntax/{rivertypes => alloytypes}/secret_test.go (65%) rename syntax/{river.go => syntax.go} (98%) rename syntax/{river_test.go => syntax_test.go} (98%) diff --git a/syntax/rivertypes/optional_secret.go b/syntax/alloytypes/optional_secret.go similarity index 93% rename from syntax/rivertypes/optional_secret.go rename to syntax/alloytypes/optional_secret.go index 75648af046..113ab91ba9 100644 --- a/syntax/rivertypes/optional_secret.go +++ b/syntax/alloytypes/optional_secret.go @@ -1,11 +1,11 @@ -package rivertypes +package alloytypes import ( "fmt" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" ) // OptionalSecret holds a potentially sensitive value. When IsSecret is true, diff --git a/syntax/rivertypes/optional_secret_test.go b/syntax/alloytypes/optional_secret_test.go similarity index 62% rename from syntax/rivertypes/optional_secret_test.go rename to syntax/alloytypes/optional_secret_test.go index bd8a0baeea..6382b109df 100644 --- a/syntax/rivertypes/optional_secret_test.go +++ b/syntax/alloytypes/optional_secret_test.go @@ -1,16 +1,16 @@ -package rivertypes_test +package alloytypes_test import ( "testing" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/token/builder" "github.com/stretchr/testify/require" ) func TestOptionalSecret(t *testing.T) { t.Run("non-sensitive conversion to string is allowed", func(t *testing.T) { - input := rivertypes.OptionalSecret{IsSecret: false, Value: "testval"} + input := alloytypes.OptionalSecret{IsSecret: false, Value: "testval"} var s string err := decodeTo(t, input, &s) @@ -19,7 +19,7 @@ func TestOptionalSecret(t *testing.T) { }) t.Run("sensitive conversion to string is disallowed", func(t *testing.T) { - input := rivertypes.OptionalSecret{IsSecret: true, Value: "testval"} + input := alloytypes.OptionalSecret{IsSecret: true, Value: "testval"} var s string err := decodeTo(t, input, &s) @@ -28,29 +28,29 @@ func TestOptionalSecret(t *testing.T) { }) t.Run("non-sensitive conversion to secret is allowed", func(t *testing.T) { - input := rivertypes.OptionalSecret{IsSecret: false, Value: "testval"} + input := alloytypes.OptionalSecret{IsSecret: false, Value: "testval"} - var s rivertypes.Secret + var s alloytypes.Secret err := decodeTo(t, input, &s) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("testval"), s) + require.Equal(t, alloytypes.Secret("testval"), s) }) t.Run("sensitive conversion to secret is allowed", func(t *testing.T) { - input := rivertypes.OptionalSecret{IsSecret: true, Value: "testval"} + input := alloytypes.OptionalSecret{IsSecret: true, Value: "testval"} - var s rivertypes.Secret + var s alloytypes.Secret err := decodeTo(t, input, &s) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("testval"), s) + require.Equal(t, alloytypes.Secret("testval"), s) }) t.Run("conversion from string is allowed", func(t *testing.T) { - var s rivertypes.OptionalSecret + var s alloytypes.OptionalSecret err := decodeTo(t, string("Hello, world!"), &s) require.NoError(t, err) - expect := rivertypes.OptionalSecret{ + expect := alloytypes.OptionalSecret{ IsSecret: false, Value: "Hello, world!", } @@ -58,11 +58,11 @@ func TestOptionalSecret(t *testing.T) { }) t.Run("conversion from secret is allowed", func(t *testing.T) { - var s rivertypes.OptionalSecret - err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + var s alloytypes.OptionalSecret + err := decodeTo(t, alloytypes.Secret("Hello, world!"), &s) require.NoError(t, err) - expect := rivertypes.OptionalSecret{ + expect := alloytypes.OptionalSecret{ IsSecret: true, Value: "Hello, world!", } @@ -76,10 +76,10 @@ func TestOptionalSecret_Write(t *testing.T) { value interface{} expect string }{ - {"non-sensitive", rivertypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, - {"sensitive", rivertypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, - {"non-sensitive pointer", &rivertypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, - {"sensitive pointer", &rivertypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, + {"non-sensitive", alloytypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, + {"sensitive", alloytypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, + {"non-sensitive pointer", &alloytypes.OptionalSecret{Value: "foobar"}, `"foobar"`}, + {"sensitive pointer", &alloytypes.OptionalSecret{IsSecret: true, Value: "foobar"}, `(secret)`}, } for _, tc := range tt { diff --git a/syntax/rivertypes/secret.go b/syntax/alloytypes/secret.go similarity index 91% rename from syntax/rivertypes/secret.go rename to syntax/alloytypes/secret.go index c2eb357d03..2cb8abc9a2 100644 --- a/syntax/rivertypes/secret.go +++ b/syntax/alloytypes/secret.go @@ -1,11 +1,11 @@ -package rivertypes +package alloytypes import ( "fmt" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" ) // Secret is a River capsule holding a sensitive string. The contents of a diff --git a/syntax/rivertypes/secret_test.go b/syntax/alloytypes/secret_test.go similarity index 65% rename from syntax/rivertypes/secret_test.go rename to syntax/alloytypes/secret_test.go index cade74647b..69b770a615 100644 --- a/syntax/rivertypes/secret_test.go +++ b/syntax/alloytypes/secret_test.go @@ -1,34 +1,34 @@ -package rivertypes_test +package alloytypes_test import ( "testing" - "github.com/grafana/river/parser" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) func TestSecret(t *testing.T) { t.Run("strings can be converted to secret", func(t *testing.T) { - var s rivertypes.Secret + var s alloytypes.Secret err := decodeTo(t, string("Hello, world!"), &s) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("Hello, world!"), s) + require.Equal(t, alloytypes.Secret("Hello, world!"), s) }) t.Run("secrets cannot be converted to strings", func(t *testing.T) { var s string - err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + err := decodeTo(t, alloytypes.Secret("Hello, world!"), &s) require.NotNil(t, err) require.Contains(t, err.Error(), "secrets may not be converted into strings") }) t.Run("secrets can be passed to secrets", func(t *testing.T) { - var s rivertypes.Secret - err := decodeTo(t, rivertypes.Secret("Hello, world!"), &s) + var s alloytypes.Secret + err := decodeTo(t, alloytypes.Secret("Hello, world!"), &s) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("Hello, world!"), s) + require.Equal(t, alloytypes.Secret("Hello, world!"), s) }) } diff --git a/syntax/ast/ast.go b/syntax/ast/ast.go index 992ee0c71a..5e71137f79 100644 --- a/syntax/ast/ast.go +++ b/syntax/ast/ast.go @@ -9,7 +9,7 @@ import ( "reflect" "strings" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" ) // Node represents any node in the AST. diff --git a/syntax/cmd/riverfmt/main.go b/syntax/cmd/riverfmt/main.go index d7b4433f5a..84fc4986bc 100644 --- a/syntax/cmd/riverfmt/main.go +++ b/syntax/cmd/riverfmt/main.go @@ -8,9 +8,9 @@ import ( "io" "os" - "github.com/grafana/river/diag" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" ) func main() { diff --git a/syntax/diag/diag.go b/syntax/diag/diag.go index a49487af61..9c57e8c9e0 100644 --- a/syntax/diag/diag.go +++ b/syntax/diag/diag.go @@ -5,7 +5,7 @@ package diag import ( "fmt" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" ) // Severity denotes the severity level of a diagnostic. The zero value of diff --git a/syntax/diag/printer.go b/syntax/diag/printer.go index 03994d68cf..4191ba702a 100644 --- a/syntax/diag/printer.go +++ b/syntax/diag/printer.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/fatih/color" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" ) const tabWidth = 4 diff --git a/syntax/diag/printer_test.go b/syntax/diag/printer_test.go index 4558e666ef..b6de2ed57b 100644 --- a/syntax/diag/printer_test.go +++ b/syntax/diag/printer_test.go @@ -5,8 +5,8 @@ import ( "fmt" "testing" - "github.com/grafana/river/diag" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/token" "github.com/stretchr/testify/require" ) diff --git a/syntax/encoding/riverjson/riverjson.go b/syntax/encoding/riverjson/riverjson.go index 7b4ef76dc3..9afdb0aba1 100644 --- a/syntax/encoding/riverjson/riverjson.go +++ b/syntax/encoding/riverjson/riverjson.go @@ -8,10 +8,10 @@ import ( "sort" "strings" - "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/syntaxtags" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token/builder" ) var goRiverDefaulter = reflect.TypeOf((*value.Defaulter)(nil)).Elem() diff --git a/syntax/encoding/riverjson/riverjson_test.go b/syntax/encoding/riverjson/riverjson_test.go index 0eeb321b59..e567f8a5cc 100644 --- a/syntax/encoding/riverjson/riverjson_test.go +++ b/syntax/encoding/riverjson/riverjson_test.go @@ -3,9 +3,9 @@ package riverjson_test import ( "testing" - river "github.com/grafana/river" - "github.com/grafana/river/encoding/riverjson" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/encoding/riverjson" "github.com/stretchr/testify/require" ) @@ -86,7 +86,7 @@ func TestValues(t *testing.T) { }, { name: "capsule", - input: rivertypes.Secret("foo"), + input: alloytypes.Secret("foo"), expectJSON: `{ "type": "capsule", "value": "(secret)" }`, }, { @@ -349,7 +349,7 @@ func TestRawMap(t *testing.T) { } func TestRawMap_Capsule(t *testing.T) { - val := map[string]any{"capsule": rivertypes.Secret("foo")} + val := map[string]any{"capsule": alloytypes.Secret("foo")} expect := `[{ "name": "capsule", diff --git a/syntax/go.mod b/syntax/go.mod index 6b85c2e5da..0e754f52b3 100644 --- a/syntax/go.mod +++ b/syntax/go.mod @@ -1,4 +1,4 @@ -module github.com/grafana/river +module github.com/grafana/alloy/syntax go 1.21.0 diff --git a/syntax/internal/reflectutil/walk.go b/syntax/internal/reflectutil/walk.go index ff7f9b927b..73ed597d1a 100644 --- a/syntax/internal/reflectutil/walk.go +++ b/syntax/internal/reflectutil/walk.go @@ -3,7 +3,7 @@ package reflectutil import ( "reflect" - "github.com/grafana/river/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/syntaxtags" ) // GetOrAlloc returns the nested field of value corresponding to index. diff --git a/syntax/internal/reflectutil/walk_test.go b/syntax/internal/reflectutil/walk_test.go index 8381125d9e..dd15ff3737 100644 --- a/syntax/internal/reflectutil/walk_test.go +++ b/syntax/internal/reflectutil/walk_test.go @@ -4,8 +4,8 @@ import ( "reflect" "testing" - "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/syntaxtags" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/syntax/internal/stdlib/stdlib.go b/syntax/internal/stdlib/stdlib.go index e73b950af8..7d7e004f35 100644 --- a/syntax/internal/stdlib/stdlib.go +++ b/syntax/internal/stdlib/stdlib.go @@ -7,8 +7,8 @@ import ( "os" "strings" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/rivertypes" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/internal/value" "github.com/ohler55/ojg/jp" "github.com/ohler55/ojg/oj" ) @@ -25,7 +25,7 @@ var Identifiers = map[string]interface{}{ "env": os.Getenv, - "nonsensitive": func(secret rivertypes.Secret) string { + "nonsensitive": func(secret alloytypes.Secret) string { return string(secret) }, diff --git a/syntax/internal/syntaxtags/syntaxtags_test.go b/syntax/internal/syntaxtags/syntaxtags_test.go index 654512d791..6c9b1b4633 100644 --- a/syntax/internal/syntaxtags/syntaxtags_test.go +++ b/syntax/internal/syntaxtags/syntaxtags_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/grafana/river/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/syntaxtags" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/syntax/internal/value/decode.go b/syntax/internal/value/decode.go index 20df78eb6a..e2bb9c33a0 100644 --- a/syntax/internal/value/decode.go +++ b/syntax/internal/value/decode.go @@ -8,7 +8,7 @@ import ( "reflect" "time" - "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/reflectutil" ) // The Defaulter interface allows a type to implement default functionality diff --git a/syntax/internal/value/decode_benchmarks_test.go b/syntax/internal/value/decode_benchmarks_test.go index 9a33239329..2c9cb97ebc 100644 --- a/syntax/internal/value/decode_benchmarks_test.go +++ b/syntax/internal/value/decode_benchmarks_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/internal/value" ) func BenchmarkObjectDecode(b *testing.B) { diff --git a/syntax/internal/value/decode_test.go b/syntax/internal/value/decode_test.go index 5b84838bd0..a53b9553af 100644 --- a/syntax/internal/value/decode_test.go +++ b/syntax/internal/value/decode_test.go @@ -8,7 +8,7 @@ import ( "time" "unsafe" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/internal/value" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/syntax/internal/value/tag_cache.go b/syntax/internal/value/tag_cache.go index 491a74b81d..1e537deba2 100644 --- a/syntax/internal/value/tag_cache.go +++ b/syntax/internal/value/tag_cache.go @@ -3,7 +3,7 @@ package value import ( "reflect" - "github.com/grafana/river/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/syntaxtags" ) // tagsCache caches the river tags for a struct type. This is never cleared, diff --git a/syntax/internal/value/type_test.go b/syntax/internal/value/type_test.go index 10ee04bc75..7a595d1a51 100644 --- a/syntax/internal/value/type_test.go +++ b/syntax/internal/value/type_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/internal/value" "github.com/stretchr/testify/require" ) diff --git a/syntax/internal/value/value.go b/syntax/internal/value/value.go index bdd8492c09..73a7b90125 100644 --- a/syntax/internal/value/value.go +++ b/syntax/internal/value/value.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/reflectutil" ) // Go types used throughout the package. diff --git a/syntax/internal/value/value_object.go b/syntax/internal/value/value_object.go index 6a642cb22f..465272d23e 100644 --- a/syntax/internal/value/value_object.go +++ b/syntax/internal/value/value_object.go @@ -3,7 +3,7 @@ package value import ( "reflect" - "github.com/grafana/river/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/reflectutil" ) // structWrapper allows for partially traversing structs which contain fields diff --git a/syntax/internal/value/value_object_test.go b/syntax/internal/value/value_object_test.go index 56d72a6102..1cb90eb872 100644 --- a/syntax/internal/value/value_object_test.go +++ b/syntax/internal/value/value_object_test.go @@ -3,7 +3,7 @@ package value_test import ( "testing" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/internal/value" "github.com/stretchr/testify/require" ) diff --git a/syntax/internal/value/value_test.go b/syntax/internal/value/value_test.go index 4583e5196d..ef75a8403f 100644 --- a/syntax/internal/value/value_test.go +++ b/syntax/internal/value/value_test.go @@ -5,7 +5,7 @@ import ( "io" "testing" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/internal/value" "github.com/stretchr/testify/require" ) diff --git a/syntax/parser/error_test.go b/syntax/parser/error_test.go index feb8602e31..a170be005a 100644 --- a/syntax/parser/error_test.go +++ b/syntax/parser/error_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" - "github.com/grafana/river/diag" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/syntax/parser/internal.go b/syntax/parser/internal.go index 1a8b7b7467..af6704a40b 100644 --- a/syntax/parser/internal.go +++ b/syntax/parser/internal.go @@ -4,10 +4,10 @@ import ( "fmt" "strings" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token" ) // parser implements the River parser. diff --git a/syntax/parser/parser.go b/syntax/parser/parser.go index 66d2199d4b..480e2b5fab 100644 --- a/syntax/parser/parser.go +++ b/syntax/parser/parser.go @@ -2,8 +2,8 @@ package parser import ( - "github.com/grafana/river/ast" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/token" ) // ParseFile parses an entire River configuration file. The data parameter diff --git a/syntax/printer/printer.go b/syntax/printer/printer.go index 8faeb22006..c7f1638913 100644 --- a/syntax/printer/printer.go +++ b/syntax/printer/printer.go @@ -7,8 +7,8 @@ import ( "math" "text/tabwriter" - "github.com/grafana/river/ast" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/token" ) // Config configures behavior of the printer. diff --git a/syntax/printer/printer_test.go b/syntax/printer/printer_test.go index 38f69217b1..9ffad8a03d 100644 --- a/syntax/printer/printer_test.go +++ b/syntax/printer/printer_test.go @@ -9,8 +9,8 @@ import ( "testing" "unicode" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" "github.com/stretchr/testify/require" ) diff --git a/syntax/printer/walker.go b/syntax/printer/walker.go index 01f71b21bd..f7495e08cb 100644 --- a/syntax/printer/walker.go +++ b/syntax/printer/walker.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/grafana/river/ast" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/token" ) // A walker walks an AST and sends lexical tokens and formatting information to diff --git a/syntax/scanner/identifier.go b/syntax/scanner/identifier.go index ed2239e060..fc5366aa61 100644 --- a/syntax/scanner/identifier.go +++ b/syntax/scanner/identifier.go @@ -3,7 +3,7 @@ package scanner import ( "fmt" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" ) // IsValidIdentifier returns true if the given string is a valid river diff --git a/syntax/scanner/identifier_test.go b/syntax/scanner/identifier_test.go index e1dfead833..badf08c08e 100644 --- a/syntax/scanner/identifier_test.go +++ b/syntax/scanner/identifier_test.go @@ -3,7 +3,7 @@ package scanner_test import ( "testing" - "github.com/grafana/river/scanner" + "github.com/grafana/alloy/syntax/scanner" "github.com/stretchr/testify/require" ) diff --git a/syntax/scanner/scanner.go b/syntax/scanner/scanner.go index e637a785b9..8a4deb953b 100644 --- a/syntax/scanner/scanner.go +++ b/syntax/scanner/scanner.go @@ -6,7 +6,7 @@ import ( "unicode" "unicode/utf8" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" ) // EBNF for the scanner: diff --git a/syntax/scanner/scanner_test.go b/syntax/scanner/scanner_test.go index 38ddcf58ca..711bb34856 100644 --- a/syntax/scanner/scanner_test.go +++ b/syntax/scanner/scanner_test.go @@ -4,7 +4,7 @@ import ( "path/filepath" "testing" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/token" "github.com/stretchr/testify/assert" ) diff --git a/syntax/river.go b/syntax/syntax.go similarity index 98% rename from syntax/river.go rename to syntax/syntax.go index 0944a9e3be..44a256603e 100644 --- a/syntax/river.go +++ b/syntax/syntax.go @@ -1,4 +1,4 @@ -// Package river implements a high-level API for decoding and encoding River +// Package syntax implements a high-level API for decoding and encoding River // configuration files. The mapping between River and Go values is described in // the documentation for the Unmarshal and Marshal functions. // @@ -6,15 +6,15 @@ // available in the inner packages. The implementation of this package is // minimal and serves as a reference for how to consume the lower-level // packages. -package river +package syntax import ( "bytes" "io" - "github.com/grafana/river/parser" - "github.com/grafana/river/token/builder" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/token/builder" + "github.com/grafana/alloy/syntax/vm" ) // Marshal returns the pretty-printed encoding of v as a River configuration diff --git a/syntax/river_test.go b/syntax/syntax_test.go similarity index 98% rename from syntax/river_test.go rename to syntax/syntax_test.go index 99247f54da..95276ab43e 100644 --- a/syntax/river_test.go +++ b/syntax/syntax_test.go @@ -1,10 +1,10 @@ -package river_test +package syntax_test import ( "fmt" "os" - river "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) func ExampleUnmarshal() { diff --git a/syntax/token/builder/builder.go b/syntax/token/builder/builder.go index b92408dbd4..89e3a1a096 100644 --- a/syntax/token/builder/builder.go +++ b/syntax/token/builder/builder.go @@ -9,10 +9,10 @@ import ( "reflect" "strings" - "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/syntaxtags" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" ) var goRiverDefaulter = reflect.TypeOf((*value.Defaulter)(nil)).Elem() diff --git a/syntax/token/builder/builder_test.go b/syntax/token/builder/builder_test.go index d363556929..9869ad6dea 100644 --- a/syntax/token/builder/builder_test.go +++ b/syntax/token/builder/builder_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" "github.com/stretchr/testify/require" ) diff --git a/syntax/token/builder/nested_defaults_test.go b/syntax/token/builder/nested_defaults_test.go index 1fd8122b28..bd6048a188 100644 --- a/syntax/token/builder/nested_defaults_test.go +++ b/syntax/token/builder/nested_defaults_test.go @@ -5,10 +5,10 @@ import ( "reflect" "testing" - "github.com/grafana/river/ast" - "github.com/grafana/river/parser" - "github.com/grafana/river/token/builder" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/token/builder" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) diff --git a/syntax/token/builder/token.go b/syntax/token/builder/token.go index 390b968959..cdbc7d70d4 100644 --- a/syntax/token/builder/token.go +++ b/syntax/token/builder/token.go @@ -4,9 +4,9 @@ import ( "bytes" "io" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" + "github.com/grafana/alloy/syntax/token" ) // A Token is a wrapper around token.Token which contains the token type diff --git a/syntax/token/builder/value_tokens.go b/syntax/token/builder/value_tokens.go index c73e34f7b6..cfc3bfd1dc 100644 --- a/syntax/token/builder/value_tokens.go +++ b/syntax/token/builder/value_tokens.go @@ -4,9 +4,9 @@ import ( "fmt" "sort" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token" ) // TODO(rfratto): check for optional values diff --git a/syntax/types.go b/syntax/types.go index b0123010b8..1005b56da8 100644 --- a/syntax/types.go +++ b/syntax/types.go @@ -1,6 +1,6 @@ -package river +package syntax -import "github.com/grafana/river/internal/value" +import "github.com/grafana/alloy/syntax/internal/value" // Our types in this file are re-implementations of interfaces from // value.Capsule. They are *not* defined as type aliases, since pkg.go.dev diff --git a/syntax/vm/constant.go b/syntax/vm/constant.go index d2e54c717d..09ef0e842e 100644 --- a/syntax/vm/constant.go +++ b/syntax/vm/constant.go @@ -4,8 +4,8 @@ import ( "fmt" "strconv" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" ) func valueFromLiteral(lit string, tok token.Token) (value.Value, error) { diff --git a/syntax/vm/error.go b/syntax/vm/error.go index c82a5b418e..fe3543dbda 100644 --- a/syntax/vm/error.go +++ b/syntax/vm/error.go @@ -4,11 +4,11 @@ import ( "fmt" "strings" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/printer" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/printer" + "github.com/grafana/alloy/syntax/token/builder" ) // makeDiagnostic tries to convert err into a diag.Diagnostic. err must be an diff --git a/syntax/vm/op_binary.go b/syntax/vm/op_binary.go index 75f53b3a4e..2b4cba251f 100644 --- a/syntax/vm/op_binary.go +++ b/syntax/vm/op_binary.go @@ -5,9 +5,9 @@ import ( "math" "reflect" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" ) func evalBinop(lhs value.Value, op token.Token, rhs value.Value) (value.Value, error) { @@ -208,13 +208,13 @@ func evalBinop(lhs value.Value, op token.Token, rhs value.Value) (value.Value, e } // tryUnwrapOptionalSecret accepts a value and, if it is a -// rivertypes.OptionalSecret where IsSecret is false, returns a string value +// alloytypes.OptionalSecret where IsSecret is false, returns a string value // instead. // -// If val is not a rivertypes.OptionalSecret or IsSecret is true, +// If val is not a alloytypes.OptionalSecret or IsSecret is true, // tryUnwrapOptionalSecret returns the input value unchanged. func tryUnwrapOptionalSecret(val value.Value) value.Value { - optSecret, ok := val.Interface().(rivertypes.OptionalSecret) + optSecret, ok := val.Interface().(alloytypes.OptionalSecret) if !ok || optSecret.IsSecret { return val } diff --git a/syntax/vm/op_binary_test.go b/syntax/vm/op_binary_test.go index 45367777e4..11803c2283 100644 --- a/syntax/vm/op_binary_test.go +++ b/syntax/vm/op_binary_test.go @@ -4,9 +4,9 @@ import ( "reflect" "testing" - "github.com/grafana/river/parser" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) @@ -14,8 +14,8 @@ func TestVM_OptionalSecret_Conversion(t *testing.T) { scope := &vm.Scope{ Variables: map[string]any{ "string_val": "hello", - "non_secret_val": rivertypes.OptionalSecret{IsSecret: false, Value: "world"}, - "secret_val": rivertypes.OptionalSecret{IsSecret: true, Value: "secret"}, + "non_secret_val": alloytypes.OptionalSecret{IsSecret: false, Value: "world"}, + "secret_val": alloytypes.OptionalSecret{IsSecret: true, Value: "secret"}, }, } diff --git a/syntax/vm/op_unary.go b/syntax/vm/op_unary.go index 9e0ffbffc2..31ef54650c 100644 --- a/syntax/vm/op_unary.go +++ b/syntax/vm/op_unary.go @@ -1,8 +1,8 @@ package vm import ( - "github.com/grafana/river/internal/value" - "github.com/grafana/river/token" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/token" ) func evalUnaryOp(op token.Token, val value.Value) (value.Value, error) { diff --git a/syntax/vm/struct_decoder.go b/syntax/vm/struct_decoder.go index cff40e2e93..249e371268 100644 --- a/syntax/vm/struct_decoder.go +++ b/syntax/vm/struct_decoder.go @@ -5,11 +5,11 @@ import ( "reflect" "strings" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/syntaxtags" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/value" ) // structDecoder decodes a series of AST statements into a Go value. diff --git a/syntax/vm/tag_cache.go b/syntax/vm/tag_cache.go index 8f1b534556..53368497ed 100644 --- a/syntax/vm/tag_cache.go +++ b/syntax/vm/tag_cache.go @@ -5,7 +5,7 @@ import ( "strings" "sync" - "github.com/grafana/river/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/syntaxtags" ) // tagsCache caches the river tags for a struct type. This is never cleared, diff --git a/syntax/vm/vm.go b/syntax/vm/vm.go index 42d2a3b942..e649b2fefc 100644 --- a/syntax/vm/vm.go +++ b/syntax/vm/vm.go @@ -6,12 +6,12 @@ import ( "reflect" "strings" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/internal/reflectutil" - "github.com/grafana/river/internal/stdlib" - "github.com/grafana/river/internal/syntaxtags" - "github.com/grafana/river/internal/value" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/internal/reflectutil" + "github.com/grafana/alloy/syntax/internal/stdlib" + "github.com/grafana/alloy/syntax/internal/syntaxtags" + "github.com/grafana/alloy/syntax/internal/value" ) // Evaluator evaluates River AST nodes into Go values. Each Evaluator is bound diff --git a/syntax/vm/vm_benchmarks_test.go b/syntax/vm/vm_benchmarks_test.go index e5530ccb37..24938b6206 100644 --- a/syntax/vm/vm_benchmarks_test.go +++ b/syntax/vm/vm_benchmarks_test.go @@ -6,8 +6,8 @@ import ( "reflect" "testing" - "github.com/grafana/river/parser" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) diff --git a/syntax/vm/vm_block_test.go b/syntax/vm/vm_block_test.go index ebc2ff0e6b..7b4cd34a68 100644 --- a/syntax/vm/vm_block_test.go +++ b/syntax/vm/vm_block_test.go @@ -6,9 +6,9 @@ import ( "reflect" "testing" - "github.com/grafana/river/ast" - "github.com/grafana/river/parser" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) diff --git a/syntax/vm/vm_errors_test.go b/syntax/vm/vm_errors_test.go index 87acdd7b1b..bec22de001 100644 --- a/syntax/vm/vm_errors_test.go +++ b/syntax/vm/vm_errors_test.go @@ -3,8 +3,8 @@ package vm_test import ( "testing" - "github.com/grafana/river/parser" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) diff --git a/syntax/vm/vm_stdlib_test.go b/syntax/vm/vm_stdlib_test.go index f395f199e5..591a7bdd27 100644 --- a/syntax/vm/vm_stdlib_test.go +++ b/syntax/vm/vm_stdlib_test.go @@ -5,10 +5,10 @@ import ( "reflect" "testing" - "github.com/grafana/river/internal/value" - "github.com/grafana/river/parser" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/internal/value" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) @@ -108,8 +108,8 @@ func TestStdlibJsonPath(t *testing.T) { func TestStdlib_Nonsensitive(t *testing.T) { scope := &vm.Scope{ Variables: map[string]any{ - "secret": rivertypes.Secret("foo"), - "optionalSecret": rivertypes.OptionalSecret{Value: "bar"}, + "secret": alloytypes.Secret("foo"), + "optionalSecret": alloytypes.OptionalSecret{Value: "bar"}, }, } diff --git a/syntax/vm/vm_test.go b/syntax/vm/vm_test.go index 5591b08d88..f073b94d4b 100644 --- a/syntax/vm/vm_test.go +++ b/syntax/vm/vm_test.go @@ -6,10 +6,10 @@ import ( "testing" "unicode" - "github.com/grafana/river/parser" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) From cb46c615111312364387460b3352874321c0e2c5 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:19:22 -0400 Subject: [PATCH 041/136] all: use github.com/grafana/alloy/syntax submodule --- go.mod | 20 ++++++++----------- go.sum | 2 -- internal/cmd/rivereval/main.go | 6 +++--- internal/component/all/all_test.go | 2 +- internal/component/common/config/types.go | 2 +- .../component/common/config/types_test.go | 2 +- .../common/kubernetes/kubernetes_test.go | 2 +- internal/component/common/net/config_test.go | 2 +- .../component/common/relabel/relabel_test.go | 2 +- internal/component/component_provider.go | 2 +- internal/component/discovery/aws/ec2.go | 2 +- internal/component/discovery/aws/lightsail.go | 2 +- internal/component/discovery/azure/azure.go | 2 +- .../component/discovery/azure/azure_test.go | 2 +- internal/component/discovery/consul/consul.go | 2 +- .../component/discovery/consul/consul_test.go | 2 +- .../discovery/consulagent/consulagent.go | 2 +- .../discovery/consulagent/consulagent_test.go | 2 +- .../discovery/digitalocean/digitalocean.go | 2 +- .../digitalocean/digitalocean_test.go | 2 +- internal/component/discovery/dns/dns_test.go | 2 +- .../component/discovery/docker/docker_test.go | 2 +- .../discovery/dockerswarm/dockerswarm_test.go | 4 ++-- .../component/discovery/eureka/eureka_test.go | 4 ++-- .../component/discovery/file/file_test.go | 2 +- internal/component/discovery/gce/gce_test.go | 2 +- .../discovery/hetzner/hetzner_test.go | 2 +- .../component/discovery/http/http_test.go | 2 +- .../component/discovery/ionos/ionos_test.go | 4 ++-- .../discovery/kubelet/kubelet_test.go | 2 +- .../discovery/kubernetes/kubernetes_test.go | 2 +- .../component/discovery/kuma/kuma_test.go | 2 +- .../component/discovery/linode/linode_test.go | 2 +- .../component/discovery/marathon/marathon.go | 2 +- .../discovery/marathon/marathon_test.go | 4 ++-- .../component/discovery/nerve/nerve_test.go | 2 +- .../component/discovery/nomad/nomad_test.go | 2 +- .../discovery/openstack/openstack.go | 2 +- .../discovery/openstack/openstack_test.go | 2 +- .../component/discovery/ovhcloud/ovhcloud.go | 2 +- .../discovery/ovhcloud/ovhcloud_test.go | 2 +- .../discovery/puppetdb/puppetdb_test.go | 2 +- .../discovery/relabel/relabel_test.go | 2 +- .../component/discovery/scaleway/scaleway.go | 2 +- .../discovery/scaleway/scaleway_test.go | 2 +- .../discovery/serverset/serverset_test.go | 2 +- .../component/discovery/triton/triton_test.go | 2 +- internal/component/discovery/uyuni/uyuni.go | 2 +- .../component/discovery/uyuni/uyuni_test.go | 2 +- internal/component/faro/receiver/arguments.go | 4 ++-- internal/component/local/file/file.go | 2 +- internal/component/local/file/file_test.go | 2 +- .../component/loki/process/process_test.go | 2 +- .../process/stages/eventlogmessage_test.go | 2 +- .../loki/process/stages/extensions.go | 2 +- .../loki/process/stages/json_test.go | 2 +- .../loki/process/stages/pipeline_test.go | 2 +- .../component/loki/relabel/relabel_test.go | 2 +- .../loki/rules/kubernetes/rules_test.go | 2 +- .../internal/lokipush/push_api_server_test.go | 2 +- .../loki/source/aws_firehose/component.go | 2 +- .../azure_event_hubs/azure_event_hubs_test.go | 2 +- .../loki/source/cloudflare/cloudflare.go | 2 +- .../loki/source/docker/docker_test.go | 2 +- internal/component/loki/source/kafka/kafka.go | 2 +- .../component/loki/source/kafka/kafka_test.go | 2 +- .../loki/source/kubernetes/kubernetes_test.go | 2 +- .../loki/source/podlogs/podlogs_test.go | 2 +- internal/component/loki/write/write_test.go | 2 +- .../mimir/rules/kubernetes/rules_test.go | 2 +- internal/component/otelcol/auth/auth.go | 2 +- .../component/otelcol/auth/basic/basic.go | 2 +- .../otelcol/auth/basic/basic_test.go | 2 +- .../component/otelcol/auth/bearer/bearer.go | 2 +- .../otelcol/auth/bearer/bearer_test.go | 2 +- .../component/otelcol/auth/headers/headers.go | 4 ++-- .../otelcol/auth/headers/headers_test.go | 2 +- .../component/otelcol/auth/oauth2/oauth2.go | 2 +- .../otelcol/auth/oauth2/oauth2_test.go | 2 +- .../otelcol/auth/sigv4/sigv4_test.go | 2 +- .../component/otelcol/config_filter_test.go | 2 +- internal/component/otelcol/config_retry.go | 2 +- internal/component/otelcol/config_tls.go | 2 +- .../otelcol/connector/host_info/host_info.go | 2 +- .../connector/host_info/host_info_test.go | 2 +- .../connector/servicegraph/servicegraph.go | 2 +- .../servicegraph/servicegraph_test.go | 2 +- .../otelcol/connector/spanlogs/spanlogs.go | 2 +- .../connector/spanlogs/spanlogs_test.go | 2 +- .../connector/spanmetrics/spanmetrics.go | 2 +- .../connector/spanmetrics/spanmetrics_test.go | 2 +- .../otelcol/connector/spanmetrics/types.go | 2 +- .../exporter/loadbalancing/loadbalancing.go | 2 +- .../loadbalancing/loadbalancing_test.go | 2 +- .../otelcol/exporter/otlp/otlp_test.go | 2 +- .../exporter/otlphttp/otlphttp_test.go | 2 +- .../exporter/prometheus/prometheus_test.go | 2 +- .../jaeger_remote_sampling_test.go | 2 +- .../processor/attributes/attributes_test.go | 2 +- .../otelcol/processor/batch/batch_test.go | 2 +- .../otelcol/processor/discovery/discovery.go | 2 +- .../processor/discovery/discovery_test.go | 2 +- .../otelcol/processor/filter/filter_test.go | 2 +- .../k8sattributes/k8sattributes_test.go | 2 +- .../memorylimiter/memorylimiter_test.go | 2 +- .../probabilistic_sampler.go | 2 +- .../probabilistic_sampler_test.go | 2 +- .../internal/aws/ec2/config.go | 2 +- .../internal/aws/ecs/config.go | 2 +- .../internal/aws/eks/config.go | 2 +- .../internal/aws/elasticbeanstalk/config.go | 2 +- .../internal/aws/lambda/config.go | 2 +- .../internal/azure/aks/config.go | 2 +- .../internal/azure/config.go | 2 +- .../internal/consul/config.go | 4 ++-- .../internal/docker/config.go | 2 +- .../resourcedetection/internal/gcp/config.go | 2 +- .../internal/heroku/config.go | 2 +- .../internal/k8snode/config.go | 2 +- .../internal/openshift/config.go | 2 +- .../internal/system/config.go | 2 +- .../resourcedetection/resourcedetection.go | 2 +- .../resourcedetection_test.go | 2 +- .../otelcol/processor/span/span_test.go | 2 +- .../tail_sampling/tail_sampling_test.go | 2 +- .../otelcol/processor/tail_sampling/types.go | 2 +- .../processor/transform/transform_test.go | 2 +- .../otelcol/receiver/jaeger/jaeger_test.go | 2 +- .../component/otelcol/receiver/kafka/kafka.go | 2 +- .../otelcol/receiver/kafka/kafka_test.go | 2 +- .../otelcol/receiver/loki/loki_test.go | 2 +- .../receiver/opencensus/opencensus_test.go | 2 +- .../otelcol/receiver/otlp/otlp_test.go | 2 +- .../receiver/prometheus/prometheus_test.go | 2 +- .../otelcol/receiver/vcenter/vcenter.go | 2 +- .../otelcol/receiver/vcenter/vcenter_test.go | 2 +- .../otelcol/receiver/zipkin/zipkin_test.go | 2 +- .../prometheus/exporter/blackbox/blackbox.go | 2 +- .../exporter/blackbox/blackbox_test.go | 2 +- .../exporter/cadvisor/cadvisor_test.go | 2 +- .../prometheus/exporter/cloudwatch/config.go | 2 +- .../exporter/cloudwatch/config_test.go | 2 +- .../exporter/dnsmasq/dnsmasq_test.go | 2 +- .../elasticsearch/elasticsearch_test.go | 4 ++-- .../prometheus/exporter/gcp/gcp_test.go | 2 +- .../prometheus/exporter/github/github.go | 2 +- .../prometheus/exporter/github/github_test.go | 2 +- .../prometheus/exporter/kafka/kafka.go | 2 +- .../prometheus/exporter/kafka/kafka_test.go | 2 +- .../exporter/memcached/memcached_test.go | 2 +- .../prometheus/exporter/mongodb/mongodb.go | 2 +- .../exporter/mongodb/mongodb_test.go | 2 +- .../prometheus/exporter/mssql/mssql.go | 2 +- .../prometheus/exporter/mssql/mssql_test.go | 4 ++-- .../prometheus/exporter/mysql/mysql.go | 2 +- .../prometheus/exporter/mysql/mysql_test.go | 4 ++-- .../prometheus/exporter/oracledb/oracledb.go | 2 +- .../exporter/oracledb/oracledb_test.go | 4 ++-- .../prometheus/exporter/postgres/postgres.go | 2 +- .../exporter/postgres/postgres_test.go | 4 ++-- .../exporter/process/process_test.go | 2 +- .../prometheus/exporter/redis/redis.go | 2 +- .../prometheus/exporter/redis/redis_test.go | 2 +- .../prometheus/exporter/snmp/snmp.go | 2 +- .../prometheus/exporter/snmp/snmp_test.go | 2 +- .../exporter/snowflake/snowflake.go | 2 +- .../exporter/snowflake/snowflake_test.go | 4 ++-- .../prometheus/exporter/squid/squid.go | 2 +- .../prometheus/exporter/squid/squid_test.go | 4 ++-- .../prometheus/exporter/statsd/statsd_test.go | 2 +- .../windows/config_default_windows_test.go | 2 +- .../exporter/windows/windows_test.go | 2 +- .../prometheus/operator/types_test.go | 2 +- .../prometheus/relabel/relabel_test.go | 2 +- .../remotewrite/remote_write_test.go | 2 +- .../component/prometheus/remotewrite/types.go | 2 +- .../prometheus/remotewrite/types_test.go | 2 +- .../prometheus/scrape/scrape_test.go | 2 +- .../pyroscope/ebpf/ebpf_linux_test.go | 2 +- .../component/pyroscope/scrape/scrape_test.go | 2 +- .../component/pyroscope/write/write_test.go | 2 +- internal/component/remote/http/http.go | 2 +- internal/component/remote/http/http_test.go | 4 ++-- .../component/remote/kubernetes/kubernetes.go | 2 +- .../remote/kubernetes/kubernetes_test.go | 2 +- internal/component/remote/s3/s3.go | 2 +- internal/component/remote/s3/types.go | 2 +- internal/component/remote/vault/auth.go | 2 +- internal/component/remote/vault/vault.go | 2 +- internal/component/remote/vault/vault_test.go | 4 ++-- .../internal/common/convert_appendable.go | 6 +++--- .../internal/common/convert_logs_receiver.go | 6 +++--- .../internal/common/convert_targets.go | 6 +++--- .../internal/common/convert_targets_test.go | 2 +- .../internal/common/custom_tokenizer.go | 4 ++-- .../internal/common/http_client_config.go | 2 +- .../converter/internal/common/river_utils.go | 12 +++++------ .../internal/common/river_utils_test.go | 2 +- .../converter/internal/common/validate.go | 2 +- .../internal/otelcolconvert/converter.go | 2 +- .../converter_basicauthextension.go | 2 +- .../converter_bearertokenauthextension.go | 4 ++-- .../converter_headerssetterextension.go | 2 +- .../otelcolconvert/converter_helpers.go | 4 ++-- .../otelcolconvert/converter_kafkareceiver.go | 2 +- .../converter_oauth2clientauthextension.go | 2 +- .../otelcolconvert/converter_otlpreceiver.go | 2 +- .../internal/otelcolconvert/otelcolconvert.go | 2 +- .../internal/otelcolconvert/utils.go | 2 +- .../build/prometheus_blocks.go | 2 +- .../prometheusconvert/component/azure.go | 2 +- .../prometheusconvert/component/consul.go | 2 +- .../component/digitalocean.go | 2 +- .../prometheusconvert/component/ec2.go | 2 +- .../prometheusconvert/component/lightsail.go | 2 +- .../prometheusconvert/component/marathon.go | 2 +- .../prometheusconvert/component/openstack.go | 2 +- .../prometheusconvert/component/ovhcloud.go | 2 +- .../component/remote_write.go | 2 +- .../prometheusconvert/component/scaleway.go | 2 +- .../prometheusconvert/prometheusconvert.go | 2 +- .../internal/build/cloudflare.go | 2 +- .../internal/build/consul_agent.go | 2 +- .../promtailconvert/internal/build/kafka.go | 2 +- .../internal/build/loki_write.go | 2 +- .../internal/build/scrape_builder.go | 4 ++-- .../promtailconvert/promtailconvert.go | 2 +- .../internal/build/app_agent_receiver.go | 4 ++-- .../internal/build/blackbox_exporter.go | 2 +- .../staticconvert/internal/build/builder.go | 2 +- .../internal/build/builder_integrations.go | 2 +- .../internal/build/elasticsearch_exporter.go | 2 +- .../internal/build/eventhandler.go | 2 +- .../internal/build/github_exporter.go | 2 +- .../internal/build/kafka_exporter.go | 2 +- .../internal/build/mongodb_exporter.go | 2 +- .../internal/build/mssql_exporter.go | 2 +- .../internal/build/mysqld_exporter.go | 2 +- .../internal/build/oracledb_exporter.go | 2 +- .../internal/build/postgres_exporter.go | 2 +- .../internal/build/redis_exporter.go | 2 +- .../internal/build/snmp_exporter.go | 2 +- .../internal/build/snowflake_exporter.go | 2 +- .../internal/build/squid_exporter.go | 2 +- .../internal/staticconvert/staticconvert.go | 4 ++-- .../flow/internal/controller/block_node.go | 4 ++-- .../controller/component_node_manager.go | 2 +- .../controller/component_references.go | 6 +++--- .../controller/custom_component_registry.go | 2 +- internal/flow/internal/controller/loader.go | 4 ++-- .../flow/internal/controller/loader_test.go | 6 +++--- internal/flow/internal/controller/module.go | 2 +- .../controller/node_builtin_component.go | 4 ++-- .../flow/internal/controller/node_config.go | 4 ++-- .../controller/node_config_argument.go | 4 ++-- .../internal/controller/node_config_export.go | 4 ++-- .../internal/controller/node_config_import.go | 6 +++--- .../controller/node_config_logging.go | 4 ++-- .../controller/node_config_tracing.go | 4 ++-- .../controller/node_custom_component.go | 4 ++-- .../flow/internal/controller/node_declare.go | 4 ++-- .../flow/internal/controller/node_service.go | 4 ++-- .../internal/controller/scheduler_test.go | 4 ++-- .../flow/internal/controller/value_cache.go | 2 +- .../flow/internal/importsource/import_file.go | 2 +- .../flow/internal/importsource/import_git.go | 2 +- .../flow/internal/importsource/import_http.go | 2 +- .../internal/importsource/import_source.go | 2 +- .../internal/importsource/import_string.go | 4 ++-- .../testcomponents/module/file/file.go | 2 +- .../testcomponents/module/http/http.go | 2 +- .../testcomponents/module/string/string.go | 2 +- internal/flow/logging/options.go | 2 +- internal/flow/module.go | 4 ++-- internal/flow/source.go | 6 +++--- internal/flow/source_test.go | 4 ++-- internal/flowmode/cmd_convert.go | 2 +- internal/flowmode/cmd_fmt.go | 6 +++--- internal/flowmode/cmd_run.go | 2 +- internal/service/http/http_test.go | 2 +- internal/service/http/tls.go | 4 ++-- internal/service/remotecfg/remotecfg.go | 2 +- internal/service/remotecfg/remotecfg_test.go | 2 +- internal/vcs/auth.go | 2 +- 284 files changed, 351 insertions(+), 357 deletions(-) diff --git a/go.mod b/go.mod index b98da58626..5b2eb5eacf 100644 --- a/go.mod +++ b/go.mod @@ -62,13 +62,12 @@ require ( github.com/grafana/pyroscope/api v0.4.0 github.com/grafana/pyroscope/ebpf v0.4.3 github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db - github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grafana/vmware_exporter v0.0.4-beta - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/hashicorp/consul/api v1.25.1 - github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v1.0.2 @@ -89,10 +88,9 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.4 github.com/lib/pq v1.10.9 - github.com/mackerelio/go-osstat v0.2.3 github.com/miekg/dns v1.1.56 github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 - github.com/mitchellh/reflectwalk v1.0.2 + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncabatoff/process-exporter v0.7.10 github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0 @@ -128,9 +126,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.87.0 - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/ory/dockertest/v3 v3.8.1 github.com/oschwald/geoip2-golang v1.9.0 github.com/percona/mongodb_exporter v0.39.1-0.20230706092307-28432707eb65 @@ -302,7 +300,7 @@ require ( github.com/beevik/ntp v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible // indirect - github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blang/semver/v4 v4.0.0 github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cenkalti/backoff/v3 v3.0.0 // indirect @@ -610,6 +608,7 @@ require ( connectrpc.com/connect v1.14.0 github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 github.com/grafana/agent-remote-config v0.0.2 + github.com/grafana/alloy/syntax v0.0.0-00010101000000-000000000000 github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d github.com/natefinch/atomic v1.0.1 @@ -774,7 +773,4 @@ replace github.com/github/smimesign => github.com/grafana/smimesign v0.2.1-0.202 replace github.com/prometheus-community/stackdriver_exporter => github.com/grafana/stackdriver_exporter v0.0.0-20240228143257-3a2c9acef5a2 // Submodules. -// TODO(rfratto): Change all imports of github.com/grafana/river in favor of -// importing github.com/grafana/alloy/syntax and change module and package -// names to remove references of "river". -replace github.com/grafana/river => ./syntax +replace github.com/grafana/alloy/syntax => ./syntax diff --git a/go.sum b/go.sum index 5f14164a2b..c87f2262f8 100644 --- a/go.sum +++ b/go.sum @@ -1504,8 +1504,6 @@ github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMn github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mackerelio/go-osstat v0.2.3 h1:jAMXD5erlDE39kdX2CU7YwCGRcxIO33u/p8+Fhe5dJw= -github.com/mackerelio/go-osstat v0.2.3/go.mod h1:DQbPOnsss9JHIXgBStc/dnhhir3gbd3YH+Dbdi7ptMA= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= diff --git a/internal/cmd/rivereval/main.go b/internal/cmd/rivereval/main.go index 8535f3ccb1..1a7fe8cc9c 100644 --- a/internal/cmd/rivereval/main.go +++ b/internal/cmd/rivereval/main.go @@ -6,9 +6,9 @@ import ( "fmt" "os" - "github.com/grafana/river/parser" - "github.com/grafana/river/token/builder" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/token/builder" + "github.com/grafana/alloy/syntax/vm" ) func main() { diff --git a/internal/component/all/all_test.go b/internal/component/all/all_test.go index bbb3d205f8..a6dbbf34f0 100644 --- a/internal/component/all/all_test.go +++ b/internal/component/all/all_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/grafana/agent/internal/component" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/internal/component/common/config/types.go b/internal/component/common/config/types.go index 5ed0d2e8ae..416844b74d 100644 --- a/internal/component/common/config/types.go +++ b/internal/component/common/config/types.go @@ -7,7 +7,7 @@ import ( "net/url" "strings" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) diff --git a/internal/component/common/config/types_test.go b/internal/component/common/config/types_test.go index dbdbed6c28..a2f31f8439 100644 --- a/internal/component/common/config/types_test.go +++ b/internal/component/common/config/types_test.go @@ -3,7 +3,7 @@ package config import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/common/kubernetes/kubernetes_test.go b/internal/component/common/kubernetes/kubernetes_test.go index 254fe9a9d7..596de1f698 100644 --- a/internal/component/common/kubernetes/kubernetes_test.go +++ b/internal/component/common/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/common/net/config_test.go b/internal/component/common/net/config_test.go index 0eb8bf1082..7b79c02451 100644 --- a/internal/component/common/net/config_test.go +++ b/internal/component/common/net/config_test.go @@ -7,7 +7,7 @@ import ( dskit "github.com/grafana/dskit/server" "github.com/stretchr/testify/require" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) // testArguments mimics an arguments type used by a component, applying the defaults to ServerConfig diff --git a/internal/component/common/relabel/relabel_test.go b/internal/component/common/relabel/relabel_test.go index 0a5d0c9879..2ae9672465 100644 --- a/internal/component/common/relabel/relabel_test.go +++ b/internal/component/common/relabel/relabel_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) func TestParseConfig(t *testing.T) { diff --git a/internal/component/component_provider.go b/internal/component/component_provider.go index 82a60782df..a9f2d9f1d1 100644 --- a/internal/component/component_provider.go +++ b/internal/component/component_provider.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/grafana/river/encoding/riverjson" + "github.com/grafana/alloy/syntax/encoding/riverjson" ) var ( diff --git a/internal/component/discovery/aws/ec2.go b/internal/component/discovery/aws/ec2.go index 7f8deb3693..0a81afd082 100644 --- a/internal/component/discovery/aws/ec2.go +++ b/internal/component/discovery/aws/ec2.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promaws "github.com/prometheus/prometheus/discovery/aws" diff --git a/internal/component/discovery/aws/lightsail.go b/internal/component/discovery/aws/lightsail.go index fa9c76a6ef..f04b6b0740 100644 --- a/internal/component/discovery/aws/lightsail.go +++ b/internal/component/discovery/aws/lightsail.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promaws "github.com/prometheus/prometheus/discovery/aws" diff --git a/internal/component/discovery/azure/azure.go b/internal/component/discovery/azure/azure.go index 78a8a669e0..aaa6392f51 100644 --- a/internal/component/discovery/azure/azure.go +++ b/internal/component/discovery/azure/azure.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" common "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/azure" diff --git a/internal/component/discovery/azure/azure_test.go b/internal/component/discovery/azure/azure_test.go index df28d6765e..6bc8bc9afa 100644 --- a/internal/component/discovery/azure/azure_test.go +++ b/internal/component/discovery/azure/azure_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" diff --git a/internal/component/discovery/consul/consul.go b/internal/component/discovery/consul/consul.go index a7fe7b95ec..095e99173c 100644 --- a/internal/component/discovery/consul/consul.go +++ b/internal/component/discovery/consul/consul.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/consul" diff --git a/internal/component/discovery/consul/consul_test.go b/internal/component/discovery/consul/consul_test.go index 98e72666cc..a265c6d51e 100644 --- a/internal/component/discovery/consul/consul_test.go +++ b/internal/component/discovery/consul/consul_test.go @@ -3,7 +3,7 @@ package consul import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/consulagent/consulagent.go b/internal/component/discovery/consulagent/consulagent.go index 2610fbcc9c..61453edaac 100644 --- a/internal/component/discovery/consulagent/consulagent.go +++ b/internal/component/discovery/consulagent/consulagent.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" ) diff --git a/internal/component/discovery/consulagent/consulagent_test.go b/internal/component/discovery/consulagent/consulagent_test.go index 40b4a265fe..4648059ec3 100644 --- a/internal/component/discovery/consulagent/consulagent_test.go +++ b/internal/component/discovery/consulagent/consulagent_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" diff --git a/internal/component/discovery/digitalocean/digitalocean.go b/internal/component/discovery/digitalocean/digitalocean.go index 1cae77f9be..a7f32f80f2 100644 --- a/internal/component/discovery/digitalocean/digitalocean.go +++ b/internal/component/discovery/digitalocean/digitalocean.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/digitalocean" ) diff --git a/internal/component/discovery/digitalocean/digitalocean_test.go b/internal/component/discovery/digitalocean/digitalocean_test.go index 1988858333..8d3bff8daa 100644 --- a/internal/component/discovery/digitalocean/digitalocean_test.go +++ b/internal/component/discovery/digitalocean/digitalocean_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" prom_common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/dns/dns_test.go b/internal/component/discovery/dns/dns_test.go index 9ee3b1f9d6..7b9264ebdf 100644 --- a/internal/component/discovery/dns/dns_test.go +++ b/internal/component/discovery/dns/dns_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" diff --git a/internal/component/discovery/docker/docker_test.go b/internal/component/discovery/docker/docker_test.go index f86903080f..542844ecb6 100644 --- a/internal/component/discovery/docker/docker_test.go +++ b/internal/component/discovery/docker/docker_test.go @@ -3,7 +3,7 @@ package docker import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/dockerswarm/dockerswarm_test.go b/internal/component/discovery/dockerswarm/dockerswarm_test.go index d15b6a94f3..7848107767 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm_test.go +++ b/internal/component/discovery/dockerswarm/dockerswarm_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/eureka/eureka_test.go b/internal/component/discovery/eureka/eureka_test.go index ebe6c516b3..5492c690ba 100644 --- a/internal/component/discovery/eureka/eureka_test.go +++ b/internal/component/discovery/eureka/eureka_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_sd "github.com/prometheus/prometheus/discovery/eureka" diff --git a/internal/component/discovery/file/file_test.go b/internal/component/discovery/file/file_test.go index 7cdfa20340..9594936603 100644 --- a/internal/component/discovery/file/file_test.go +++ b/internal/component/discovery/file/file_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/gce/gce_test.go b/internal/component/discovery/gce/gce_test.go index 27b4ceab40..018dbb6af7 100644 --- a/internal/component/discovery/gce/gce_test.go +++ b/internal/component/discovery/gce/gce_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/hetzner/hetzner_test.go b/internal/component/discovery/hetzner/hetzner_test.go index da1f3e1f8c..e4087ea22b 100644 --- a/internal/component/discovery/hetzner/hetzner_test.go +++ b/internal/component/discovery/hetzner/hetzner_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" diff --git a/internal/component/discovery/http/http_test.go b/internal/component/discovery/http/http_test.go index 2827f495d1..6c371e6897 100644 --- a/internal/component/discovery/http/http_test.go +++ b/internal/component/discovery/http/http_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/internal/component/discovery/ionos/ionos_test.go b/internal/component/discovery/ionos/ionos_test.go index ab3a3c2726..d5a90b4eb6 100644 --- a/internal/component/discovery/ionos/ionos_test.go +++ b/internal/component/discovery/ionos/ionos_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/kubelet/kubelet_test.go b/internal/component/discovery/kubelet/kubelet_test.go index 89292f06be..a117a6b962 100644 --- a/internal/component/discovery/kubelet/kubelet_test.go +++ b/internal/component/discovery/kubelet/kubelet_test.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/kubernetes/kubernetes_test.go b/internal/component/discovery/kubernetes/kubernetes_test.go index ad6308f7c6..7d70e5a16f 100644 --- a/internal/component/discovery/kubernetes/kubernetes_test.go +++ b/internal/component/discovery/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/kuma/kuma_test.go b/internal/component/discovery/kuma/kuma_test.go index c1c89a66a9..fed1de55ca 100644 --- a/internal/component/discovery/kuma/kuma_test.go +++ b/internal/component/discovery/kuma/kuma_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/linode/linode_test.go b/internal/component/discovery/linode/linode_test.go index 3f26becc68..fc7daf7852 100644 --- a/internal/component/discovery/linode/linode_test.go +++ b/internal/component/discovery/linode/linode_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/marathon/marathon.go b/internal/component/discovery/marathon/marathon.go index c948732925..7b842ccfc9 100644 --- a/internal/component/discovery/marathon/marathon.go +++ b/internal/component/discovery/marathon/marathon.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/marathon" diff --git a/internal/component/discovery/marathon/marathon_test.go b/internal/component/discovery/marathon/marathon_test.go index 3108e3171d..bfde1faad7 100644 --- a/internal/component/discovery/marathon/marathon_test.go +++ b/internal/component/discovery/marathon/marathon_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/nerve/nerve_test.go b/internal/component/discovery/nerve/nerve_test.go index 8c202c8905..25ca6c235f 100644 --- a/internal/component/discovery/nerve/nerve_test.go +++ b/internal/component/discovery/nerve/nerve_test.go @@ -3,7 +3,7 @@ package nerve import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/nomad/nomad_test.go b/internal/component/discovery/nomad/nomad_test.go index 587a47c55c..880ac7801f 100644 --- a/internal/component/discovery/nomad/nomad_test.go +++ b/internal/component/discovery/nomad/nomad_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" diff --git a/internal/component/discovery/openstack/openstack.go b/internal/component/discovery/openstack/openstack.go index 885c151d83..b220c90b06 100644 --- a/internal/component/discovery/openstack/openstack.go +++ b/internal/component/discovery/openstack/openstack.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/openstack" diff --git a/internal/component/discovery/openstack/openstack_test.go b/internal/component/discovery/openstack/openstack_test.go index 623c7b5be4..9b42836d46 100644 --- a/internal/component/discovery/openstack/openstack_test.go +++ b/internal/component/discovery/openstack/openstack_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/openstack" diff --git a/internal/component/discovery/ovhcloud/ovhcloud.go b/internal/component/discovery/ovhcloud/ovhcloud.go index ee5abd9902..2a3605d327 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud.go +++ b/internal/component/discovery/ovhcloud/ovhcloud.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" diff --git a/internal/component/discovery/ovhcloud/ovhcloud_test.go b/internal/component/discovery/ovhcloud/ovhcloud_test.go index e6fc4a1f51..3d644bb6eb 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud_test.go +++ b/internal/component/discovery/ovhcloud/ovhcloud_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/discovery/ovhcloud" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" prom_ovh "github.com/prometheus/prometheus/discovery/ovhcloud" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/puppetdb/puppetdb_test.go b/internal/component/discovery/puppetdb/puppetdb_test.go index f63efe5a06..8151dea5e2 100644 --- a/internal/component/discovery/puppetdb/puppetdb_test.go +++ b/internal/component/discovery/puppetdb/puppetdb_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" diff --git a/internal/component/discovery/relabel/relabel_test.go b/internal/component/discovery/relabel/relabel_test.go index a2478d1175..ace24bd895 100644 --- a/internal/component/discovery/relabel/relabel_test.go +++ b/internal/component/discovery/relabel/relabel_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/discovery/relabel" "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/scaleway/scaleway.go b/internal/component/discovery/scaleway/scaleway.go index 2a20c71f85..303d7aeaf6 100644 --- a/internal/component/discovery/scaleway/scaleway.go +++ b/internal/component/discovery/scaleway/scaleway.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/scaleway" diff --git a/internal/component/discovery/scaleway/scaleway_test.go b/internal/component/discovery/scaleway/scaleway_test.go index b96955f715..85683dd50c 100644 --- a/internal/component/discovery/scaleway/scaleway_test.go +++ b/internal/component/discovery/scaleway/scaleway_test.go @@ -3,7 +3,7 @@ package scaleway import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/serverset/serverset_test.go b/internal/component/discovery/serverset/serverset_test.go index 1f1f7a8c6e..9d7722a4f8 100644 --- a/internal/component/discovery/serverset/serverset_test.go +++ b/internal/component/discovery/serverset/serverset_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/triton/triton_test.go b/internal/component/discovery/triton/triton_test.go index 074ab0c214..6acff0c3b8 100644 --- a/internal/component/discovery/triton/triton_test.go +++ b/internal/component/discovery/triton/triton_test.go @@ -3,7 +3,7 @@ package triton import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/uyuni/uyuni.go b/internal/component/discovery/uyuni/uyuni.go index 4d67ead59d..8aaf376fbe 100644 --- a/internal/component/discovery/uyuni/uyuni.go +++ b/internal/component/discovery/uyuni/uyuni.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/uyuni" diff --git a/internal/component/discovery/uyuni/uyuni_test.go b/internal/component/discovery/uyuni/uyuni_test.go index a66e1799a2..45d3e93772 100644 --- a/internal/component/discovery/uyuni/uyuni_test.go +++ b/internal/component/discovery/uyuni/uyuni_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index 0169f0e80e..405c26c40d 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -6,8 +6,8 @@ import ( "github.com/alecthomas/units" "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) // Arguments configures the app_agent_receiver component. diff --git a/internal/component/local/file/file.go b/internal/component/local/file/file.go index 0655eea7c2..9533549198 100644 --- a/internal/component/local/file/file.go +++ b/internal/component/local/file/file.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/featuregate" filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) // waitReadPeriod holds the time to wait before reading a file while the diff --git a/internal/component/local/file/file_test.go b/internal/component/local/file/file_test.go index 304fb82937..9eeff6e01b 100644 --- a/internal/component/local/file/file_test.go +++ b/internal/component/local/file/file_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/local/file" filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/process/process_test.go b/internal/component/loki/process/process_test.go index 8a423c02af..bdefb8e7fe 100644 --- a/internal/component/loki/process/process_test.go +++ b/internal/component/loki/process/process_test.go @@ -15,8 +15,8 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/river" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/process/stages/eventlogmessage_test.go b/internal/component/loki/process/stages/eventlogmessage_test.go index 8e6edba862..4ba93e6f01 100644 --- a/internal/component/loki/process/stages/eventlogmessage_test.go +++ b/internal/component/loki/process/stages/eventlogmessage_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" + river "github.com/grafana/alloy/syntax" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/river" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/process/stages/extensions.go b/internal/component/loki/process/stages/extensions.go index 3abbfd6243..cc802be61d 100644 --- a/internal/component/loki/process/stages/extensions.go +++ b/internal/component/loki/process/stages/extensions.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/json_test.go b/internal/component/loki/process/stages/json_test.go index 982c70554c..c60db76157 100644 --- a/internal/component/loki/process/stages/json_test.go +++ b/internal/component/loki/process/stages/json_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) var testJSONRiverSingleStageWithoutSource = ` diff --git a/internal/component/loki/process/stages/pipeline_test.go b/internal/component/loki/process/stages/pipeline_test.go index 52988ddeda..025e7826d0 100644 --- a/internal/component/loki/process/stages/pipeline_test.go +++ b/internal/component/loki/process/stages/pipeline_test.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/flow/logging/level" + river "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" util_log "github.com/grafana/loki/pkg/util/log" - "github.com/grafana/river" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" diff --git a/internal/component/loki/relabel/relabel_test.go b/internal/component/loki/relabel/relabel_test.go index e562bd03d8..73cc06f640 100644 --- a/internal/component/loki/relabel/relabel_test.go +++ b/internal/component/loki/relabel/relabel_test.go @@ -14,8 +14,8 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/river" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/rules/kubernetes/rules_test.go b/internal/component/loki/rules/kubernetes/rules_test.go index 74ccd4cbeb..5339a22253 100644 --- a/internal/component/loki/rules/kubernetes/rules_test.go +++ b/internal/component/loki/rules/kubernetes/rules_test.go @@ -3,7 +3,7 @@ package rules import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go index de68ffaf64..0fc61f42f4 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go @@ -21,10 +21,10 @@ import ( "github.com/grafana/agent/internal/component/common/loki/client/fake" fnet "github.com/grafana/agent/internal/component/common/net" frelabel "github.com/grafana/agent/internal/component/common/relabel" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/flagext" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/push" - "github.com/grafana/river" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/aws_firehose/component.go b/internal/component/loki/source/aws_firehose/component.go index f4ec7e31f1..a0215f7bef 100644 --- a/internal/component/loki/source/aws_firehose/component.go +++ b/internal/component/loki/source/aws_firehose/component.go @@ -18,7 +18,7 @@ import ( flow_relabel "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/loki/source/aws_firehose/internal" "github.com/grafana/agent/internal/util" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { diff --git a/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go b/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go index 68ffb3a95a..69393115a7 100644 --- a/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go +++ b/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go @@ -3,7 +3,7 @@ package azure_event_hubs import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/cloudflare/cloudflare.go b/internal/component/loki/source/cloudflare/cloudflare.go index 937c650106..fa14de31fa 100644 --- a/internal/component/loki/source/cloudflare/cloudflare.go +++ b/internal/component/loki/source/cloudflare/cloudflare.go @@ -19,7 +19,7 @@ import ( cft "github.com/grafana/agent/internal/component/loki/source/cloudflare/internal/cloudflaretarget" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/docker/docker_test.go b/internal/component/loki/source/docker/docker_test.go index 722a720772..7457884db3 100644 --- a/internal/component/loki/source/docker/docker_test.go +++ b/internal/component/loki/source/docker/docker_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/kafka/kafka.go b/internal/component/loki/source/kafka/kafka.go index 067eb87142..10265c6f29 100644 --- a/internal/component/loki/source/kafka/kafka.go +++ b/internal/component/loki/source/kafka/kafka.go @@ -12,8 +12,8 @@ import ( kt "github.com/grafana/agent/internal/component/loki/source/internal/kafkatarget" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/flagext" - "github.com/grafana/river/rivertypes" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/kafka/kafka_test.go b/internal/component/loki/source/kafka/kafka_test.go index c97d05ffcb..db7386ebf8 100644 --- a/internal/component/loki/source/kafka/kafka_test.go +++ b/internal/component/loki/source/kafka/kafka_test.go @@ -3,7 +3,7 @@ package kafka import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/kubernetes/kubernetes_test.go b/internal/component/loki/source/kubernetes/kubernetes_test.go index ad1fc5872e..6896a26b3d 100644 --- a/internal/component/loki/source/kubernetes/kubernetes_test.go +++ b/internal/component/loki/source/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/podlogs/podlogs_test.go b/internal/component/loki/source/podlogs/podlogs_test.go index ffb0e2fa1d..0b616fb429 100644 --- a/internal/component/loki/source/podlogs/podlogs_test.go +++ b/internal/component/loki/source/podlogs/podlogs_test.go @@ -3,7 +3,7 @@ package podlogs import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/write/write_test.go b/internal/component/loki/write/write_test.go index f0a797ead8..f71d3e9df4 100644 --- a/internal/component/loki/write/write_test.go +++ b/internal/component/loki/write/write_test.go @@ -16,7 +16,7 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/internal/component/mimir/rules/kubernetes/rules_test.go b/internal/component/mimir/rules/kubernetes/rules_test.go index 74ccd4cbeb..5339a22253 100644 --- a/internal/component/mimir/rules/kubernetes/rules_test.go +++ b/internal/component/mimir/rules/kubernetes/rules_test.go @@ -3,7 +3,7 @@ package rules import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index f824a92acd..2ae132438b 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" "github.com/grafana/agent/internal/util/zapadapter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/auth/basic/basic.go b/internal/component/otelcol/auth/basic/basic.go index 6cdc41bca8..2997c9b665 100644 --- a/internal/component/otelcol/auth/basic/basic.go +++ b/internal/component/otelcol/auth/basic/basic.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" diff --git a/internal/component/otelcol/auth/basic/basic_test.go b/internal/component/otelcol/auth/basic/basic_test.go index 3754210db4..5a60be95a6 100644 --- a/internal/component/otelcol/auth/basic/basic_test.go +++ b/internal/component/otelcol/auth/basic/basic_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/basic" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" diff --git a/internal/component/otelcol/auth/bearer/bearer.go b/internal/component/otelcol/auth/bearer/bearer.go index 865c17f2d9..55510036fa 100644 --- a/internal/component/otelcol/auth/bearer/bearer.go +++ b/internal/component/otelcol/auth/bearer/bearer.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" diff --git a/internal/component/otelcol/auth/bearer/bearer_test.go b/internal/component/otelcol/auth/bearer/bearer_test.go index e40a9a7398..e56b6c37de 100644 --- a/internal/component/otelcol/auth/bearer/bearer_test.go +++ b/internal/component/otelcol/auth/bearer/bearer_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/bearer" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" diff --git a/internal/component/otelcol/auth/headers/headers.go b/internal/component/otelcol/auth/headers/headers.go index bf5883690d..f1f9d391ca 100644 --- a/internal/component/otelcol/auth/headers/headers.go +++ b/internal/component/otelcol/auth/headers/headers.go @@ -9,8 +9,8 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/auth/headers/headers_test.go b/internal/component/otelcol/auth/headers/headers_test.go index 8796dfe3a2..2d39864354 100644 --- a/internal/component/otelcol/auth/headers/headers_test.go +++ b/internal/component/otelcol/auth/headers/headers_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/headers" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/auth/oauth2/oauth2.go b/internal/component/otelcol/auth/oauth2/oauth2.go index b90164711b..642a6225a4 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2.go +++ b/internal/component/otelcol/auth/oauth2/oauth2.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" diff --git a/internal/component/otelcol/auth/oauth2/oauth2_test.go b/internal/component/otelcol/auth/oauth2/oauth2_test.go index ed704ed3c6..3f12c762bd 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2_test.go +++ b/internal/component/otelcol/auth/oauth2/oauth2_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" "gotest.tools/assert" diff --git a/internal/component/otelcol/auth/sigv4/sigv4_test.go b/internal/component/otelcol/auth/sigv4/sigv4_test.go index 401ce0f63f..24ed7d6a9c 100644 --- a/internal/component/otelcol/auth/sigv4/sigv4_test.go +++ b/internal/component/otelcol/auth/sigv4/sigv4_test.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/sigv4" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" diff --git a/internal/component/otelcol/config_filter_test.go b/internal/component/otelcol/config_filter_test.go index 918fe96cbd..97409cef6d 100644 --- a/internal/component/otelcol/config_filter_test.go +++ b/internal/component/otelcol/config_filter_test.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/plog" ) diff --git a/internal/component/otelcol/config_retry.go b/internal/component/otelcol/config_retry.go index 12a2ffea85..7c94dba3d5 100644 --- a/internal/component/otelcol/config_retry.go +++ b/internal/component/otelcol/config_retry.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" otelexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" ) diff --git a/internal/component/otelcol/config_tls.go b/internal/component/otelcol/config_tls.go index 58e02ad687..65a460d87b 100644 --- a/internal/component/otelcol/config_tls.go +++ b/internal/component/otelcol/config_tls.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" otelconfigtls "go.opentelemetry.io/collector/config/configtls" ) diff --git a/internal/component/otelcol/connector/host_info/host_info.go b/internal/component/otelcol/connector/host_info/host_info.go index b97f55397b..0468830fe7 100644 --- a/internal/component/otelcol/connector/host_info/host_info.go +++ b/internal/component/otelcol/connector/host_info/host_info.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" ) diff --git a/internal/component/otelcol/connector/host_info/host_info_test.go b/internal/component/otelcol/connector/host_info/host_info_test.go index d13f3d0e27..b7db269e6d 100644 --- a/internal/component/otelcol/connector/host_info/host_info_test.go +++ b/internal/component/otelcol/connector/host_info/host_info_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index dfd25b2be4..96705966fb 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go index f437c5ce45..adcc03c3a5 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs.go b/internal/component/otelcol/connector/spanlogs/spanlogs.go index a72a36eb2e..5ccf94e6a1 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) func init() { diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go index 8caf182d8b..5b1033a84a 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index cc376f6a1f..54e8fd78be 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go index 1672172f76..4e8c20b158 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/connector/spanmetrics/types.go b/internal/component/otelcol/connector/spanmetrics/types.go index bba03fbe04..8c88c19d8c 100644 --- a/internal/component/otelcol/connector/spanmetrics/types.go +++ b/internal/component/otelcol/connector/spanmetrics/types.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" ) diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index b069ba2f8f..51f1bc2a33 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/component/otelcol/exporter" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" otelcomponent "go.opentelemetry.io/collector/component" otelconfigauth "go.opentelemetry.io/collector/config/configauth" diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index 8034531ffb..aaee9ed9f1 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/configgrpc" diff --git a/internal/component/otelcol/exporter/otlp/otlp_test.go b/internal/component/otelcol/exporter/otlp/otlp_test.go index 86c531e2e0..48add799d4 100644 --- a/internal/component/otelcol/exporter/otlp/otlp_test.go +++ b/internal/component/otelcol/exporter/otlp/otlp_test.go @@ -12,8 +12,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go index 664bae4e46..19a58725c2 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go @@ -13,8 +13,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" ) diff --git a/internal/component/otelcol/exporter/prometheus/prometheus_test.go b/internal/component/otelcol/exporter/prometheus/prometheus_test.go index 430c6965d5..8f78b0833f 100644 --- a/internal/component/otelcol/exporter/prometheus/prometheus_test.go +++ b/internal/component/otelcol/exporter/prometheus/prometheus_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go index 9b2bd7374f..dab1d1e093 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/processor/attributes/attributes_test.go b/internal/component/otelcol/processor/attributes/attributes_test.go index 1e58776cd1..69731121f5 100644 --- a/internal/component/otelcol/processor/attributes/attributes_test.go +++ b/internal/component/otelcol/processor/attributes/attributes_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/batch/batch_test.go b/internal/component/otelcol/processor/batch/batch_test.go index 28e76085f5..bb14a136ba 100644 --- a/internal/component/otelcol/processor/batch/batch_test.go +++ b/internal/component/otelcol/processor/batch/batch_test.go @@ -11,8 +11,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/batchprocessor" diff --git a/internal/component/otelcol/processor/discovery/discovery.go b/internal/component/otelcol/processor/discovery/discovery.go index 2268991399..250156dee9 100644 --- a/internal/component/otelcol/processor/discovery/discovery.go +++ b/internal/component/otelcol/processor/discovery/discovery.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) func init() { diff --git a/internal/component/otelcol/processor/discovery/discovery_test.go b/internal/component/otelcol/processor/discovery/discovery_test.go index ac74167332..a33d4c1573 100644 --- a/internal/component/otelcol/processor/discovery/discovery_test.go +++ b/internal/component/otelcol/processor/discovery/discovery_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" semconv "go.opentelemetry.io/collector/semconv/v1.5.0" ) diff --git a/internal/component/otelcol/processor/filter/filter_test.go b/internal/component/otelcol/processor/filter/filter_test.go index a86d2d3fcd..58eb9861ed 100644 --- a/internal/component/otelcol/processor/filter/filter_test.go +++ b/internal/component/otelcol/processor/filter/filter_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/filter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go index 1a5df80a63..cefcecc6dd 100644 --- a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go +++ b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/k8sattributes" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go index a5efd41467..03342d47f0 100644 --- a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go +++ b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go @@ -11,8 +11,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" ) diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index 0ec2860750..a3096e8c8b 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/processor" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go index a1dc28eaf8..0f67443034 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go index 6037c05696..e4fb57a449 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -2,7 +2,7 @@ package ec2 import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "ec2" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go index d5c82182a9..07f2dfb6e4 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -2,7 +2,7 @@ package ecs import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "ecs" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go index a0b2ca60c4..b357b793ca 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -2,7 +2,7 @@ package eks import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "eks" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go index 72bee032ef..8fa5ba5b18 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -2,7 +2,7 @@ package elasticbeanstalk import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "elasticbeanstalk" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go index 1e7d95b620..5b6654d5cd 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -2,7 +2,7 @@ package lambda import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "lambda" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go index b23e0fdba6..7592436521 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -2,7 +2,7 @@ package aks import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "aks" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go index 375863d347..dcfcfb7a03 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -2,7 +2,7 @@ package azure import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "azure" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index af51b19e06..4862f728b2 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -2,8 +2,8 @@ package consul import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go index f67c79c39e..9f3f15c21d 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -2,7 +2,7 @@ package docker import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "docker" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go index d97848bc16..7b86d99a29 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -2,7 +2,7 @@ package gcp import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "gcp" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go index d009bac91e..4e66e85f38 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -2,7 +2,7 @@ package heroku import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "heroku" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go index 62303f94c5..4b53b48ced 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -3,7 +3,7 @@ package k8snode import ( "github.com/grafana/agent/internal/component/otelcol" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "kubernetes_node" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go index 94c594edf2..1ea7173cf1 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -3,7 +3,7 @@ package openshift import ( "github.com/grafana/agent/internal/component/otelcol" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "openshift" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index 3c72a13228..091cd5e7ac 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -4,7 +4,7 @@ import ( "fmt" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) const Name = "system" diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index e086886927..40b366db73 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -22,7 +22,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go index d52c3f65e3..64285055f5 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -19,7 +19,7 @@ import ( kubernetes_node "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/span/span_test.go b/internal/component/otelcol/processor/span/span_test.go index 7d4cd9c557..0b31f0dcc7 100644 --- a/internal/component/otelcol/processor/span/span_test.go +++ b/internal/component/otelcol/processor/span/span_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/span" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go index 37a9c1b27d..729a21c1e5 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go @@ -12,8 +12,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" ) diff --git a/internal/component/otelcol/processor/tail_sampling/types.go b/internal/component/otelcol/processor/tail_sampling/types.go index 90087cbbb6..c9c66fdf4c 100644 --- a/internal/component/otelcol/processor/tail_sampling/types.go +++ b/internal/component/otelcol/processor/tail_sampling/types.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" tsp "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" diff --git a/internal/component/otelcol/processor/transform/transform_test.go b/internal/component/otelcol/processor/transform/transform_test.go index 4c40f2d3a5..73a44bce43 100644 --- a/internal/component/otelcol/processor/transform/transform_test.go +++ b/internal/component/otelcol/processor/transform/transform_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/transform" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/receiver/jaeger/jaeger_test.go b/internal/component/otelcol/receiver/jaeger/jaeger_test.go index c2c7fb9b67..f07d319ba2 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger_test.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/jaeger" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index d52223f1ad..08a6cabc35 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" diff --git a/internal/component/otelcol/receiver/kafka/kafka_test.go b/internal/component/otelcol/receiver/kafka/kafka_test.go index c03c8a63ef..16d93a773f 100644 --- a/internal/component/otelcol/receiver/kafka/kafka_test.go +++ b/internal/component/otelcol/receiver/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" diff --git a/internal/component/otelcol/receiver/loki/loki_test.go b/internal/component/otelcol/receiver/loki/loki_test.go index f69df27272..9ec47bb48a 100644 --- a/internal/component/otelcol/receiver/loki/loki_test.go +++ b/internal/component/otelcol/receiver/loki/loki_test.go @@ -10,8 +10,8 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/river" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/plog" diff --git a/internal/component/otelcol/receiver/opencensus/opencensus_test.go b/internal/component/otelcol/receiver/opencensus/opencensus_test.go index aa8536a3e3..b4eb5aa5e0 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus_test.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/opencensus" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "github.com/phayes/freeport" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/receiver/otlp/otlp_test.go b/internal/component/otelcol/receiver/otlp/otlp_test.go index 5b148d539d..fea621cb47 100644 --- a/internal/component/otelcol/receiver/otlp/otlp_test.go +++ b/internal/component/otelcol/receiver/otlp/otlp_test.go @@ -14,8 +14,8 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" "github.com/phayes/freeport" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/internal/component/otelcol/receiver/prometheus/prometheus_test.go b/internal/component/otelcol/receiver/prometheus/prometheus_test.go index 92263b1aa5..6ba3a43f65 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus_test.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus_test.go @@ -11,7 +11,7 @@ import ( flowprometheus "github.com/grafana/agent/internal/component/prometheus" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/otelcol/receiver/vcenter/vcenter.go b/internal/component/otelcol/receiver/vcenter/vcenter.go index 68f3eea716..11e19b464b 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/receiver/vcenter/vcenter_test.go b/internal/component/otelcol/receiver/vcenter/vcenter_test.go index 2104eb65b2..dc78595718 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter_test.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver/vcenter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/receiver/zipkin/zipkin_test.go b/internal/component/otelcol/receiver/zipkin/zipkin_test.go index 4377bb5e33..5fec2fa441 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin_test.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/zipkin" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" "github.com/phayes/freeport" "github.com/stretchr/testify/require" diff --git a/internal/component/prometheus/exporter/blackbox/blackbox.go b/internal/component/prometheus/exporter/blackbox/blackbox.go index a429818b61..c71a93bd6c 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" "github.com/grafana/agent/internal/util" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { diff --git a/internal/component/prometheus/exporter/blackbox/blackbox_test.go b/internal/component/prometheus/exporter/blackbox/blackbox_test.go index d9d78bd5a1..15440fbc23 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox_test.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" blackbox_config "github.com/prometheus/blackbox_exporter/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go index 7195140524..af53a3a935 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/cadvisor" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/cloudwatch/config.go b/internal/component/prometheus/exporter/cloudwatch/config.go index 62a0757b2e..4e867374d5 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config.go +++ b/internal/component/prometheus/exporter/cloudwatch/config.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" yaceConf "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/config" yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" ) diff --git a/internal/component/prometheus/exporter/cloudwatch/config_test.go b/internal/component/prometheus/exporter/cloudwatch/config_test.go index 37ad2ce01e..ef5611da5a 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config_test.go +++ b/internal/component/prometheus/exporter/cloudwatch/config_test.go @@ -3,7 +3,7 @@ package cloudwatch import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" yaceConf "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/config" yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" "github.com/stretchr/testify/require" diff --git a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go index dbf6a904f7..01d3fe5715 100644 --- a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go +++ b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go index d6b18347ae..a7756191bb 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go @@ -6,8 +6,8 @@ import ( commonCfg "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promCfg "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/gcp/gcp_test.go b/internal/component/prometheus/exporter/gcp/gcp_test.go index f18c140d9a..020c91422c 100644 --- a/internal/component/prometheus/exporter/gcp/gcp_test.go +++ b/internal/component/prometheus/exporter/gcp/gcp_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/github/github.go b/internal/component/prometheus/exporter/github/github.go index 9519637733..b0d36af09b 100644 --- a/internal/component/prometheus/exporter/github/github.go +++ b/internal/component/prometheus/exporter/github/github.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/github_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/github/github_test.go b/internal/component/prometheus/exporter/github/github_test.go index 00f1648284..be63c76c95 100644 --- a/internal/component/prometheus/exporter/github/github_test.go +++ b/internal/component/prometheus/exporter/github/github_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/kafka/kafka.go b/internal/component/prometheus/exporter/kafka/kafka.go index fcd12fe85f..09982a742a 100644 --- a/internal/component/prometheus/exporter/kafka/kafka.go +++ b/internal/component/prometheus/exporter/kafka/kafka.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/kafka/kafka_test.go b/internal/component/prometheus/exporter/kafka/kafka_test.go index 1bf0a88726..9d7b134b96 100644 --- a/internal/component/prometheus/exporter/kafka/kafka_test.go +++ b/internal/component/prometheus/exporter/kafka/kafka_test.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/memcached/memcached_test.go b/internal/component/prometheus/exporter/memcached/memcached_test.go index e3b4a3ae5a..f31789753f 100644 --- a/internal/component/prometheus/exporter/memcached/memcached_test.go +++ b/internal/component/prometheus/exporter/memcached/memcached_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/static/integrations/memcached_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/prometheus/exporter/mongodb/mongodb.go b/internal/component/prometheus/exporter/mongodb/mongodb.go index 46685c422a..888a569f3e 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/mongodb/mongodb_test.go b/internal/component/prometheus/exporter/mongodb/mongodb_test.go index b33590c84e..c2e4ce3e3e 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb_test.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/mssql/mssql.go b/internal/component/prometheus/exporter/mssql/mssql.go index 368e844294..0b159d876a 100644 --- a/internal/component/prometheus/exporter/mssql/mssql.go +++ b/internal/component/prometheus/exporter/mssql/mssql.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mssql" "github.com/grafana/agent/internal/util" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "gopkg.in/yaml.v2" ) diff --git a/internal/component/prometheus/exporter/mssql/mssql_test.go b/internal/component/prometheus/exporter/mssql/mssql_test.go index 0eb59a8b39..d6f99f0623 100644 --- a/internal/component/prometheus/exporter/mssql/mssql_test.go +++ b/internal/component/prometheus/exporter/mssql/mssql_test.go @@ -6,8 +6,8 @@ import ( "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/internal/static/integrations/mssql" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" diff --git a/internal/component/prometheus/exporter/mysql/mysql.go b/internal/component/prometheus/exporter/mysql/mysql.go index 7eba014902..1145133186 100644 --- a/internal/component/prometheus/exporter/mysql/mysql.go +++ b/internal/component/prometheus/exporter/mysql/mysql.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/mysql/mysql_test.go b/internal/component/prometheus/exporter/mysql/mysql_test.go index 45a69d8286..4b0940c158 100644 --- a/internal/component/prometheus/exporter/mysql/mysql_test.go +++ b/internal/component/prometheus/exporter/mysql/mysql_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/oracledb/oracledb.go b/internal/component/prometheus/exporter/oracledb/oracledb.go index 008549bf72..3e316988c2 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/oracledb/oracledb_test.go b/internal/component/prometheus/exporter/oracledb/oracledb_test.go index d339151dac..52c1291846 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb_test.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/postgres/postgres.go b/internal/component/prometheus/exporter/postgres/postgres.go index d67eeafb61..4bd4b5557a 100644 --- a/internal/component/prometheus/exporter/postgres/postgres.go +++ b/internal/component/prometheus/exporter/postgres/postgres.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/lib/pq" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/postgres/postgres_test.go b/internal/component/prometheus/exporter/postgres/postgres_test.go index b4a84f6c4f..0b2ccbe928 100644 --- a/internal/component/prometheus/exporter/postgres/postgres_test.go +++ b/internal/component/prometheus/exporter/postgres/postgres_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/process/process_test.go b/internal/component/prometheus/exporter/process/process_test.go index 8f0da6c516..bcb8cc9011 100644 --- a/internal/component/prometheus/exporter/process/process_test.go +++ b/internal/component/prometheus/exporter/process/process_test.go @@ -3,7 +3,7 @@ package process import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/ncabatoff/process-exporter/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/redis/redis.go b/internal/component/prometheus/exporter/redis/redis.go index 8085f4dda0..394727a529 100644 --- a/internal/component/prometheus/exporter/redis/redis.go +++ b/internal/component/prometheus/exporter/redis/redis.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/redis/redis_test.go b/internal/component/prometheus/exporter/redis/redis_test.go index 35503de473..15986afcb9 100644 --- a/internal/component/prometheus/exporter/redis/redis_test.go +++ b/internal/component/prometheus/exporter/redis/redis_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/snmp/snmp.go b/internal/component/prometheus/exporter/snmp/snmp.go index 0a455eea72..4025c98d69 100644 --- a/internal/component/prometheus/exporter/snmp/snmp.go +++ b/internal/component/prometheus/exporter/snmp/snmp.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" "gopkg.in/yaml.v2" ) diff --git a/internal/component/prometheus/exporter/snmp/snmp_test.go b/internal/component/prometheus/exporter/snmp/snmp_test.go index 6d5a0e97d7..6f4c6d36ae 100644 --- a/internal/component/prometheus/exporter/snmp/snmp_test.go +++ b/internal/component/prometheus/exporter/snmp/snmp_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/prometheus/snmp_exporter/config" diff --git a/internal/component/prometheus/exporter/snowflake/snowflake.go b/internal/component/prometheus/exporter/snowflake/snowflake.go index bb93d9df1c..aff54c510f 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/snowflake/snowflake_test.go b/internal/component/prometheus/exporter/snowflake/snowflake_test.go index 83ecf189ca..62ca26d739 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake_test.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/squid/squid.go b/internal/component/prometheus/exporter/squid/squid.go index 6ef589005e..4945e59eb7 100644 --- a/internal/component/prometheus/exporter/squid/squid.go +++ b/internal/component/prometheus/exporter/squid/squid.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/squid/squid_test.go b/internal/component/prometheus/exporter/squid/squid_test.go index 78dc2af2bf..39e8a30cf2 100644 --- a/internal/component/prometheus/exporter/squid/squid_test.go +++ b/internal/component/prometheus/exporter/squid/squid_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/statsd/statsd_test.go b/internal/component/prometheus/exporter/statsd/statsd_test.go index 942caf812e..7721265436 100644 --- a/internal/component/prometheus/exporter/statsd/statsd_test.go +++ b/internal/component/prometheus/exporter/statsd/statsd_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/windows/config_default_windows_test.go b/internal/component/prometheus/exporter/windows/config_default_windows_test.go index 6cdd9cf602..ba31ed8ea9 100644 --- a/internal/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/internal/component/prometheus/exporter/windows/config_default_windows_test.go @@ -3,7 +3,7 @@ package windows import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/windows/windows_test.go b/internal/component/prometheus/exporter/windows/windows_test.go index 6f15ad6e75..042cf982e3 100644 --- a/internal/component/prometheus/exporter/windows/windows_test.go +++ b/internal/component/prometheus/exporter/windows/windows_test.go @@ -3,7 +3,7 @@ package windows import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/operator/types_test.go b/internal/component/prometheus/operator/types_test.go index d827d61e70..cab515b2ca 100644 --- a/internal/component/prometheus/operator/types_test.go +++ b/internal/component/prometheus/operator/types_test.go @@ -3,7 +3,7 @@ package operator import ( "testing" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/relabel/relabel_test.go b/internal/component/prometheus/relabel/relabel_test.go index b1e660b79e..dcd1e27f52 100644 --- a/internal/component/prometheus/relabel/relabel_test.go +++ b/internal/component/prometheus/relabel/relabel_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/service/labelstore" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/prometheus/remotewrite/remote_write_test.go b/internal/component/prometheus/remotewrite/remote_write_test.go index 8920f98562..819cd0cd53 100644 --- a/internal/component/prometheus/remotewrite/remote_write_test.go +++ b/internal/component/prometheus/remotewrite/remote_write_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/prometheus/remotewrite" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" diff --git a/internal/component/prometheus/remotewrite/types.go b/internal/component/prometheus/remotewrite/types.go index 67f550ee88..1b681c4c82 100644 --- a/internal/component/prometheus/remotewrite/types.go +++ b/internal/component/prometheus/remotewrite/types.go @@ -8,7 +8,7 @@ import ( types "github.com/grafana/agent/internal/component/common/config" flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/google/uuid" common "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/remotewrite/types_test.go b/internal/component/prometheus/remotewrite/types_test.go index 2c013c4c06..ed7f4ec751 100644 --- a/internal/component/prometheus/remotewrite/types_test.go +++ b/internal/component/prometheus/remotewrite/types_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" commonconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" diff --git a/internal/component/prometheus/scrape/scrape_test.go b/internal/component/prometheus/scrape/scrape_test.go index 5a96d412d1..fbfda5799d 100644 --- a/internal/component/prometheus/scrape/scrape_test.go +++ b/internal/component/prometheus/scrape/scrape_test.go @@ -14,8 +14,8 @@ import ( http_service "github.com/grafana/agent/internal/service/http" "github.com/grafana/agent/internal/service/labelstore" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" "github.com/grafana/ckit/memconn" - "github.com/grafana/river" prometheus_client "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/ebpf/ebpf_linux_test.go b/internal/component/pyroscope/ebpf/ebpf_linux_test.go index 1a7db5e31c..05c8a21fb9 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux_test.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux_test.go @@ -12,10 +12,10 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" ebpfspy "github.com/grafana/pyroscope/ebpf" "github.com/grafana/pyroscope/ebpf/pprof" "github.com/grafana/pyroscope/ebpf/sd" - "github.com/grafana/river" "github.com/oklog/run" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" diff --git a/internal/component/pyroscope/scrape/scrape_test.go b/internal/component/pyroscope/scrape/scrape_test.go index 719629ed2f..b9daf6a4dc 100644 --- a/internal/component/pyroscope/scrape/scrape_test.go +++ b/internal/component/pyroscope/scrape/scrape_test.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/service/cluster" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/pyroscope/write/write_test.go b/internal/component/pyroscope/write/write_test.go index 1a02834440..68f28a34f9 100644 --- a/internal/component/pyroscope/write/write_test.go +++ b/internal/component/pyroscope/write/write_test.go @@ -13,10 +13,10 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" pushv1 "github.com/grafana/pyroscope/api/gen/proto/go/push/v1" "github.com/grafana/pyroscope/api/gen/proto/go/push/v1/pushv1connect" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" - "github.com/grafana/river" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" diff --git a/internal/component/remote/http/http.go b/internal/component/remote/http/http.go index b4a4d87540..9bb83b4c71 100644 --- a/internal/component/remote/http/http.go +++ b/internal/component/remote/http/http.go @@ -16,7 +16,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/useragent" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) diff --git a/internal/component/remote/http/http_test.go b/internal/component/remote/http/http_test.go index 003ee8008c..1b2d4b7df1 100644 --- a/internal/component/remote/http/http_test.go +++ b/internal/component/remote/http/http_test.go @@ -14,9 +14,9 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/backoff" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" "github.com/stretchr/testify/require" ) diff --git a/internal/component/remote/kubernetes/kubernetes.go b/internal/component/remote/kubernetes/kubernetes.go index 9e5de1d537..94c390f179 100644 --- a/internal/component/remote/kubernetes/kubernetes.go +++ b/internal/component/remote/kubernetes/kubernetes.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" client_go "k8s.io/client-go/kubernetes" diff --git a/internal/component/remote/kubernetes/kubernetes_test.go b/internal/component/remote/kubernetes/kubernetes_test.go index 2cf564fe12..583b480e5c 100644 --- a/internal/component/remote/kubernetes/kubernetes_test.go +++ b/internal/component/remote/kubernetes/kubernetes_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" "gotest.tools/assert" ) diff --git a/internal/component/remote/s3/s3.go b/internal/component/remote/s3/s3.go index b572bd43d0..4bd1b72fa3 100644 --- a/internal/component/remote/s3/s3.go +++ b/internal/component/remote/s3/s3.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/remote/s3/types.go b/internal/component/remote/s3/types.go index 5d66962d81..8341d08a3a 100644 --- a/internal/component/remote/s3/types.go +++ b/internal/component/remote/s3/types.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) // Arguments implements the input for the S3 component. diff --git a/internal/component/remote/vault/auth.go b/internal/component/remote/vault/auth.go index 20ee9d530f..13a955799a 100644 --- a/internal/component/remote/vault/auth.go +++ b/internal/component/remote/vault/auth.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" vault "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/api/auth/approle" "github.com/hashicorp/vault/api/auth/aws" diff --git a/internal/component/remote/vault/vault.go b/internal/component/remote/vault/vault.go index af6ab0da0b..2da4d882a5 100644 --- a/internal/component/remote/vault/vault.go +++ b/internal/component/remote/vault/vault.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/oklog/run" vault "github.com/hashicorp/vault/api" diff --git a/internal/component/remote/vault/vault_test.go b/internal/component/remote/vault/vault_test.go index 09b3170ddf..8b9a234767 100644 --- a/internal/component/remote/vault/vault_test.go +++ b/internal/component/remote/vault/vault_test.go @@ -14,8 +14,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" diff --git a/internal/converter/internal/common/convert_appendable.go b/internal/converter/internal/common/convert_appendable.go index 59652fab63..2d159b599f 100644 --- a/internal/converter/internal/common/convert_appendable.go +++ b/internal/converter/internal/common/convert_appendable.go @@ -1,9 +1,9 @@ package common import ( - "github.com/grafana/river" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" "github.com/prometheus/prometheus/storage" ) diff --git a/internal/converter/internal/common/convert_logs_receiver.go b/internal/converter/internal/common/convert_logs_receiver.go index ad08ecb15f..e66c440786 100644 --- a/internal/converter/internal/common/convert_logs_receiver.go +++ b/internal/converter/internal/common/convert_logs_receiver.go @@ -2,9 +2,9 @@ package common import ( "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/river" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" ) // ConvertLogsReceiver allows us to override how the loki.LogsReceiver is tokenized. diff --git a/internal/converter/internal/common/convert_targets.go b/internal/converter/internal/common/convert_targets.go index bd26fb7667..0819ea9404 100644 --- a/internal/converter/internal/common/convert_targets.go +++ b/internal/converter/internal/common/convert_targets.go @@ -2,9 +2,9 @@ package common import ( "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/river" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" ) // NewDiscoveryExports will return a new [discovery.Exports] with a specific diff --git a/internal/converter/internal/common/convert_targets_test.go b/internal/converter/internal/common/convert_targets_test.go index 5a8b4ef793..ecc847f2d8 100644 --- a/internal/converter/internal/common/convert_targets_test.go +++ b/internal/converter/internal/common/convert_targets_test.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" "github.com/stretchr/testify/require" ) diff --git a/internal/converter/internal/common/custom_tokenizer.go b/internal/converter/internal/common/custom_tokenizer.go index 3fe73dcd0e..f2da2e6d25 100644 --- a/internal/converter/internal/common/custom_tokenizer.go +++ b/internal/converter/internal/common/custom_tokenizer.go @@ -1,8 +1,8 @@ package common import ( - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" ) type CustomTokenizer struct { diff --git a/internal/converter/internal/common/http_client_config.go b/internal/converter/internal/common/http_client_config.go index 93ff1f50df..6f1631cf74 100644 --- a/internal/converter/internal/common/http_client_config.go +++ b/internal/converter/internal/common/http_client_config.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) diff --git a/internal/converter/internal/common/river_utils.go b/internal/converter/internal/common/river_utils.go index eb87be7308..82544c8b85 100644 --- a/internal/converter/internal/common/river_utils.go +++ b/internal/converter/internal/common/river_utils.go @@ -5,17 +5,17 @@ import ( "fmt" "strings" - "github.com/grafana/river" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" - "github.com/grafana/river/scanner" + river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" + "github.com/grafana/alloy/syntax/scanner" "github.com/grafana/agent/internal/component" flow_relabel "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/token/builder" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/token/builder" ) // NewBlockWithOverride generates a new [*builder.Block] using a hook to diff --git a/internal/converter/internal/common/river_utils_test.go b/internal/converter/internal/common/river_utils_test.go index 8c130420b9..40d7694457 100644 --- a/internal/converter/internal/common/river_utils_test.go +++ b/internal/converter/internal/common/river_utils_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/converter/internal/common/validate.go b/internal/converter/internal/common/validate.go index 38d5cd65bc..57e750f6c1 100644 --- a/internal/converter/internal/common/validate.go +++ b/internal/converter/internal/common/validate.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" ) const ( diff --git a/internal/converter/internal/otelcolconvert/converter.go b/internal/converter/internal/otelcolconvert/converter.go index 77d74f61c2..147a18ad65 100644 --- a/internal/converter/internal/otelcolconvert/converter.go +++ b/internal/converter/internal/otelcolconvert/converter.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/otelcol" ) diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go index ac07fc6566..52083e204a 100644 --- a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/basic" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go index 63f134ec8a..b754639444 100644 --- a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go @@ -8,8 +8,8 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/bearer" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/token/builder" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/token/builder" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go index 799bc96042..60f4cdc3e6 100644 --- a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go +++ b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/headers" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_helpers.go b/internal/converter/internal/otelcolconvert/converter_helpers.go index dbef1c481a..7942e74254 100644 --- a/internal/converter/internal/otelcolconvert/converter_helpers.go +++ b/internal/converter/internal/otelcolconvert/converter_helpers.go @@ -5,8 +5,8 @@ import ( "strings" "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/river/token" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token" + "github.com/grafana/alloy/syntax/token/builder" "github.com/mitchellh/mapstructure" ) diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index ae941514e9..50faf5a373 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go index 14ba01ea91..1373df33c6 100644 --- a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index 6bbb1d7526..768787d175 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/otlp" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index 8262053c0c..97ae03a387 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/converter/expandconverter" diff --git a/internal/converter/internal/otelcolconvert/utils.go b/internal/converter/internal/otelcolconvert/utils.go index d3515919ff..8008001981 100644 --- a/internal/converter/internal/otelcolconvert/utils.go +++ b/internal/converter/internal/otelcolconvert/utils.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go index adbe26077f..69e3493186 100644 --- a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go +++ b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" ) // PrometheusBlocks is a type for categorizing River Blocks before appending diff --git a/internal/converter/internal/prometheusconvert/component/azure.go b/internal/converter/internal/prometheusconvert/component/azure.go index 92c3945f4e..4601b32c85 100644 --- a/internal/converter/internal/prometheusconvert/component/azure.go +++ b/internal/converter/internal/prometheusconvert/component/azure.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_azure "github.com/prometheus/prometheus/discovery/azure" ) diff --git a/internal/converter/internal/prometheusconvert/component/consul.go b/internal/converter/internal/prometheusconvert/component/consul.go index 6dcaa457ab..684af0dd14 100644 --- a/internal/converter/internal/prometheusconvert/component/consul.go +++ b/internal/converter/internal/prometheusconvert/component/consul.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_consul "github.com/prometheus/prometheus/discovery/consul" ) diff --git a/internal/converter/internal/prometheusconvert/component/digitalocean.go b/internal/converter/internal/prometheusconvert/component/digitalocean.go index f6ec587162..cb3cd27912 100644 --- a/internal/converter/internal/prometheusconvert/component/digitalocean.go +++ b/internal/converter/internal/prometheusconvert/component/digitalocean.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" prom_digitalocean "github.com/prometheus/prometheus/discovery/digitalocean" ) diff --git a/internal/converter/internal/prometheusconvert/component/ec2.go b/internal/converter/internal/prometheusconvert/component/ec2.go index 3a38c55084..d64000a594 100644 --- a/internal/converter/internal/prometheusconvert/component/ec2.go +++ b/internal/converter/internal/prometheusconvert/component/ec2.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) diff --git a/internal/converter/internal/prometheusconvert/component/lightsail.go b/internal/converter/internal/prometheusconvert/component/lightsail.go index 209e259f8a..60ad944ad2 100644 --- a/internal/converter/internal/prometheusconvert/component/lightsail.go +++ b/internal/converter/internal/prometheusconvert/component/lightsail.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) diff --git a/internal/converter/internal/prometheusconvert/component/marathon.go b/internal/converter/internal/prometheusconvert/component/marathon.go index 9090685eb0..c9249f1d42 100644 --- a/internal/converter/internal/prometheusconvert/component/marathon.go +++ b/internal/converter/internal/prometheusconvert/component/marathon.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_marathon "github.com/prometheus/prometheus/discovery/marathon" ) diff --git a/internal/converter/internal/prometheusconvert/component/openstack.go b/internal/converter/internal/prometheusconvert/component/openstack.go index 1e393e4490..a0a7455cf8 100644 --- a/internal/converter/internal/prometheusconvert/component/openstack.go +++ b/internal/converter/internal/prometheusconvert/component/openstack.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_openstack "github.com/prometheus/prometheus/discovery/openstack" ) diff --git a/internal/converter/internal/prometheusconvert/component/ovhcloud.go b/internal/converter/internal/prometheusconvert/component/ovhcloud.go index 9aad579c6c..c7f981d492 100644 --- a/internal/converter/internal/prometheusconvert/component/ovhcloud.go +++ b/internal/converter/internal/prometheusconvert/component/ovhcloud.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" ) diff --git a/internal/converter/internal/prometheusconvert/component/remote_write.go b/internal/converter/internal/prometheusconvert/component/remote_write.go index 86efa78fe5..ee1d2f7027 100644 --- a/internal/converter/internal/prometheusconvert/component/remote_write.go +++ b/internal/converter/internal/prometheusconvert/component/remote_write.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/sigv4" prom_config "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/storage/remote/azuread" diff --git a/internal/converter/internal/prometheusconvert/component/scaleway.go b/internal/converter/internal/prometheusconvert/component/scaleway.go index f0528baa3f..eb2c1cbe6e 100644 --- a/internal/converter/internal/prometheusconvert/component/scaleway.go +++ b/internal/converter/internal/prometheusconvert/component/scaleway.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" prom_scaleway "github.com/prometheus/prometheus/discovery/scaleway" ) diff --git a/internal/converter/internal/prometheusconvert/prometheusconvert.go b/internal/converter/internal/prometheusconvert/prometheusconvert.go index e5cdd84b4e..328da9903d 100644 --- a/internal/converter/internal/prometheusconvert/prometheusconvert.go +++ b/internal/converter/internal/prometheusconvert/prometheusconvert.go @@ -15,7 +15,7 @@ import ( prom_discover "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/storage" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" _ "github.com/prometheus/prometheus/discovery/install" // Register Prometheus SDs ) diff --git a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go index 5249d56883..fe89162f14 100644 --- a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go +++ b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/loki/source/cloudflare" "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (s *ScrapeConfigBuilder) AppendCloudFlareConfig() { diff --git a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go index a1c6c778b6..d346fe3993 100644 --- a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go +++ b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go @@ -6,8 +6,8 @@ import ( "github.com/grafana/agent/internal/component/discovery/consulagent" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" promtail_consulagent "github.com/grafana/loki/clients/pkg/promtail/discovery/consulagent" - "github.com/grafana/river/rivertypes" ) func (s *ScrapeConfigBuilder) AppendConsulAgentSDs() { diff --git a/internal/converter/internal/promtailconvert/internal/build/kafka.go b/internal/converter/internal/promtailconvert/internal/build/kafka.go index 39f5c4c951..296889d313 100644 --- a/internal/converter/internal/promtailconvert/internal/build/kafka.go +++ b/internal/converter/internal/promtailconvert/internal/build/kafka.go @@ -4,8 +4,8 @@ import ( "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/loki/source/kafka" "github.com/grafana/agent/internal/converter/internal/common" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" - "github.com/grafana/river/rivertypes" ) func (s *ScrapeConfigBuilder) AppendKafka() { diff --git a/internal/converter/internal/promtailconvert/internal/build/loki_write.go b/internal/converter/internal/promtailconvert/internal/build/loki_write.go index 2ba031e28e..acca65d958 100644 --- a/internal/converter/internal/promtailconvert/internal/build/loki_write.go +++ b/internal/converter/internal/promtailconvert/internal/build/loki_write.go @@ -8,9 +8,9 @@ import ( lokiwrite "github.com/grafana/agent/internal/component/loki/write" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/loki/clients/pkg/promtail/client" lokiflag "github.com/grafana/loki/pkg/util/flagext" - "github.com/grafana/river/token/builder" ) func NewLokiWrite(client *client.Config, diags *diag.Diagnostics, index int, labelPrefix string) (*builder.Block, loki.LogsReceiver) { diff --git a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go index 91d0a34a48..4db2f44abc 100644 --- a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -16,10 +16,10 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/grafana/loki/clients/pkg/promtail/targets/file" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token/builder" "github.com/prometheus/common/model" ) diff --git a/internal/converter/internal/promtailconvert/promtailconvert.go b/internal/converter/internal/promtailconvert/promtailconvert.go index 8c3664881c..de30dd7d17 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert.go +++ b/internal/converter/internal/promtailconvert/promtailconvert.go @@ -9,6 +9,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/promtailconvert/internal/build" + "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/dskit/flagext" promtailcfg "github.com/grafana/loki/clients/pkg/promtail/config" "github.com/grafana/loki/clients/pkg/promtail/limit" @@ -16,7 +17,6 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/grafana/loki/clients/pkg/promtail/targets/file" lokicfgutil "github.com/grafana/loki/pkg/util/cfg" - "github.com/grafana/river/token/builder" "gopkg.in/yaml.v2" ) diff --git a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go index 1926fdb97d..640a37945f 100644 --- a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go +++ b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go @@ -10,8 +10,8 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" app_agent_receiver_v2 "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/scanner" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/scanner" ) func (b *ConfigBuilder) appendAppAgentReceiverV2(config *app_agent_receiver_v2.Config) { diff --git a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go index 0c2fb9b9f7..f7ffc9c364 100644 --- a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component/prometheus/exporter/blackbox" "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" blackbox_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendBlackboxExporter(config *blackbox_exporter.Config) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/builder.go b/internal/converter/internal/staticconvert/internal/build/builder.go index 677e28b91e..c36d51fa30 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder.go +++ b/internal/converter/internal/staticconvert/internal/build/builder.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/static/config" - "github.com/grafana/river/token/builder" + "github.com/grafana/alloy/syntax/token/builder" ) type ConfigBuilder struct { diff --git a/internal/converter/internal/staticconvert/internal/build/builder_integrations.go b/internal/converter/internal/staticconvert/internal/build/builder_integrations.go index 1f268e299b..369210bcd7 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_integrations.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_integrations.go @@ -46,7 +46,7 @@ import ( metricsutils_v2 "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" "github.com/grafana/agent/internal/static/integrations/windows_exporter" - "github.com/grafana/river/scanner" + "github.com/grafana/alloy/syntax/scanner" "github.com/prometheus/common/model" prom_config "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go index 4b39f46ca3..c2225f7a4f 100644 --- a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/elasticsearch" "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendElasticsearchExporter(config *elasticsearch_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/eventhandler.go b/internal/converter/internal/staticconvert/internal/build/eventhandler.go index 2381a23b00..a99bff8e62 100644 --- a/internal/converter/internal/staticconvert/internal/build/eventhandler.go +++ b/internal/converter/internal/staticconvert/internal/build/eventhandler.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" eventhandler_v2 "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" - "github.com/grafana/river/scanner" + "github.com/grafana/alloy/syntax/scanner" ) func (b *ConfigBuilder) appendEventHandlerV2(config *eventhandler_v2.Config) { diff --git a/internal/converter/internal/staticconvert/internal/build/github_exporter.go b/internal/converter/internal/staticconvert/internal/build/github_exporter.go index 8759eb27c8..17dc312a53 100644 --- a/internal/converter/internal/staticconvert/internal/build/github_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/github_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/github" "github.com/grafana/agent/internal/static/integrations/github_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendGithubExporter(config *github_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go index b67aab7537..c520d44d85 100644 --- a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/kafka" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendKafkaExporter(config *kafka_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go index 5dfa296770..9dc15d45ce 100644 --- a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mongodb" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMongodbExporter(config *mongodb_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go index 388e93cc7f..9af089a6c8 100644 --- a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mssql" mssql_exporter "github.com/grafana/agent/internal/static/integrations/mssql" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMssqlExporter(config *mssql_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go index f7de2572d5..9f688fe757 100644 --- a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mysql" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMysqldExporter(config *mysqld_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go index bc768c1b7d..05a4a35aa6 100644 --- a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/oracledb" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendOracledbExporter(config *oracledb_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go index e73877e964..c5566071bf 100644 --- a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/postgres" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendPostgresExporter(config *postgres_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go index e54bc2f7a6..4e0bdbd38b 100644 --- a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/redis" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendRedisExporter(config *redis_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go index cc7dbe3c03..7476bf69ae 100644 --- a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/static/integrations/snmp_exporter" snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" ) diff --git a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go index 3b0e204aa9..a59ee2992a 100644 --- a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/snowflake" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendSnowflakeExporter(config *snowflake_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go index 2c93845620..37573399bc 100644 --- a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/squid" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendSquidExporter(config *squid_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/staticconvert.go b/internal/converter/internal/staticconvert/staticconvert.go index ccb7e36938..f2529386e3 100644 --- a/internal/converter/internal/staticconvert/staticconvert.go +++ b/internal/converter/internal/staticconvert/staticconvert.go @@ -13,11 +13,11 @@ import ( "github.com/grafana/agent/internal/converter/internal/staticconvert/internal/build" "github.com/grafana/agent/internal/static/config" "github.com/grafana/agent/internal/static/logs" + "github.com/grafana/alloy/syntax/scanner" + "github.com/grafana/alloy/syntax/token/builder" promtail_config "github.com/grafana/loki/clients/pkg/promtail/config" "github.com/grafana/loki/clients/pkg/promtail/limit" "github.com/grafana/loki/clients/pkg/promtail/targets/file" - "github.com/grafana/river/scanner" - "github.com/grafana/river/token/builder" prom_config "github.com/prometheus/prometheus/config" _ "github.com/grafana/agent/internal/static/integrations/install" // Install integrations diff --git a/internal/flow/internal/controller/block_node.go b/internal/flow/internal/controller/block_node.go index bab884f2ab..b43e639a5b 100644 --- a/internal/flow/internal/controller/block_node.go +++ b/internal/flow/internal/controller/block_node.go @@ -2,8 +2,8 @@ package controller import ( "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) // BlockNode is a node in the DAG which manages a River block diff --git a/internal/flow/internal/controller/component_node_manager.go b/internal/flow/internal/controller/component_node_manager.go index dfb8625518..df92551614 100644 --- a/internal/flow/internal/controller/component_node_manager.go +++ b/internal/flow/internal/controller/component_node_manager.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/grafana/river/ast" + "github.com/grafana/alloy/syntax/ast" ) // ComponentNodeManager is responsible for creating new component nodes and diff --git a/internal/flow/internal/controller/component_references.go b/internal/flow/internal/controller/component_references.go index c046895dda..c767535d96 100644 --- a/internal/flow/internal/controller/component_references.go +++ b/internal/flow/internal/controller/component_references.go @@ -4,9 +4,9 @@ import ( "fmt" "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/vm" ) // Traversal describes accessing a sequence of fields relative to a component. diff --git a/internal/flow/internal/controller/custom_component_registry.go b/internal/flow/internal/controller/custom_component_registry.go index a573e9e34a..63f6e83557 100644 --- a/internal/flow/internal/controller/custom_component_registry.go +++ b/internal/flow/internal/controller/custom_component_registry.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/grafana/river/ast" + "github.com/grafana/alloy/syntax/ast" ) // CustomComponentRegistry holds custom component definitions that are available in the context. diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index 8921d5ff18..b265eb9408 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -16,9 +16,9 @@ import ( "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/flow/tracing" "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" "github.com/grafana/dskit/backoff" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" "github.com/hashicorp/go-multierror" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" diff --git a/internal/flow/internal/controller/loader_test.go b/internal/flow/internal/controller/loader_test.go index 398cd5cae0..b0eb2ac884 100644 --- a/internal/flow/internal/controller/loader_test.go +++ b/internal/flow/internal/controller/loader_test.go @@ -13,9 +13,9 @@ import ( "github.com/grafana/agent/internal/flow/internal/dag" "github.com/grafana/agent/internal/flow/logging" "github.com/grafana/agent/internal/service" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/parser" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/parser" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace/noop" diff --git a/internal/flow/internal/controller/module.go b/internal/flow/internal/controller/module.go index 36240eabc3..25a639e399 100644 --- a/internal/flow/internal/controller/module.go +++ b/internal/flow/internal/controller/module.go @@ -4,7 +4,7 @@ import ( "context" "github.com/grafana/agent/internal/component" - "github.com/grafana/river/ast" + "github.com/grafana/alloy/syntax/ast" ) // ModuleController is a lower-level interface for module controllers which diff --git a/internal/flow/internal/controller/node_builtin_component.go b/internal/flow/internal/controller/node_builtin_component.go index f646b608ba..2ba6a9b158 100644 --- a/internal/flow/internal/controller/node_builtin_component.go +++ b/internal/flow/internal/controller/node_builtin_component.go @@ -18,8 +18,8 @@ import ( "github.com/grafana/agent/internal/flow/logging" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" ) diff --git a/internal/flow/internal/controller/node_config.go b/internal/flow/internal/controller/node_config.go index 127addb9ac..8bd233f517 100644 --- a/internal/flow/internal/controller/node_config.go +++ b/internal/flow/internal/controller/node_config.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/grafana/agent/internal/flow/internal/importsource" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" ) const ( diff --git a/internal/flow/internal/controller/node_config_argument.go b/internal/flow/internal/controller/node_config_argument.go index 56f8537d5a..75d8b7e6a2 100644 --- a/internal/flow/internal/controller/node_config_argument.go +++ b/internal/flow/internal/controller/node_config_argument.go @@ -5,8 +5,8 @@ import ( "strings" "sync" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) type ArgumentConfigNode struct { diff --git a/internal/flow/internal/controller/node_config_export.go b/internal/flow/internal/controller/node_config_export.go index dc05493113..8983c0e33c 100644 --- a/internal/flow/internal/controller/node_config_export.go +++ b/internal/flow/internal/controller/node_config_export.go @@ -5,8 +5,8 @@ import ( "strings" "sync" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) type ExportConfigNode struct { diff --git a/internal/flow/internal/controller/node_config_import.go b/internal/flow/internal/controller/node_config_import.go index 7cc95bdb4d..25efab3f86 100644 --- a/internal/flow/internal/controller/node_config_import.go +++ b/internal/flow/internal/controller/node_config_import.go @@ -19,9 +19,9 @@ import ( "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/flow/tracing" "github.com/grafana/agent/internal/runner" - "github.com/grafana/river/ast" - "github.com/grafana/river/parser" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/vm" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/flow/internal/controller/node_config_logging.go b/internal/flow/internal/controller/node_config_logging.go index ac95eae5c1..dd923e01fd 100644 --- a/internal/flow/internal/controller/node_config_logging.go +++ b/internal/flow/internal/controller/node_config_logging.go @@ -7,8 +7,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) var _ BlockNode = (*LoggingConfigNode)(nil) diff --git a/internal/flow/internal/controller/node_config_tracing.go b/internal/flow/internal/controller/node_config_tracing.go index 525e5c336f..8b4fe17ddf 100644 --- a/internal/flow/internal/controller/node_config_tracing.go +++ b/internal/flow/internal/controller/node_config_tracing.go @@ -6,8 +6,8 @@ import ( "sync" "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" "go.opentelemetry.io/otel/trace" ) diff --git a/internal/flow/internal/controller/node_custom_component.go b/internal/flow/internal/controller/node_custom_component.go index 742f27324f..d53457c6e3 100644 --- a/internal/flow/internal/controller/node_custom_component.go +++ b/internal/flow/internal/controller/node_custom_component.go @@ -12,8 +12,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) // getCustomComponentConfig is used by the custom component to retrieve its template and the customComponentRegistry associated with it. diff --git a/internal/flow/internal/controller/node_declare.go b/internal/flow/internal/controller/node_declare.go index 0dec602839..8ed1585eb6 100644 --- a/internal/flow/internal/controller/node_declare.go +++ b/internal/flow/internal/controller/node_declare.go @@ -4,8 +4,8 @@ import ( "strings" "sync" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) // DeclareNode represents a declare block in the DAG. diff --git a/internal/flow/internal/controller/node_service.go b/internal/flow/internal/controller/node_service.go index e7eadaa498..c5f2bfc034 100644 --- a/internal/flow/internal/controller/node_service.go +++ b/internal/flow/internal/controller/node_service.go @@ -8,8 +8,8 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/service" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" ) // ServiceNode is a Flow DAG node which represents a running service. diff --git a/internal/flow/internal/controller/scheduler_test.go b/internal/flow/internal/controller/scheduler_test.go index ad5ebc23ca..aaf809a8f0 100644 --- a/internal/flow/internal/controller/scheduler_test.go +++ b/internal/flow/internal/controller/scheduler_test.go @@ -7,8 +7,8 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/river/ast" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/internal/controller/value_cache.go b/internal/flow/internal/controller/value_cache.go index 0b0268cfb3..b9e66aca38 100644 --- a/internal/flow/internal/controller/value_cache.go +++ b/internal/flow/internal/controller/value_cache.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/grafana/agent/internal/component" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/vm" ) // valueCache caches component arguments and exports to expose as variables for diff --git a/internal/flow/internal/importsource/import_file.go b/internal/flow/internal/importsource/import_file.go index e0ffa09579..9887a38410 100644 --- a/internal/flow/internal/importsource/import_file.go +++ b/internal/flow/internal/importsource/import_file.go @@ -16,7 +16,7 @@ import ( "github.com/grafana/agent/internal/component" filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/vm" ) // ImportFile imports a module from a file or a folder. diff --git a/internal/flow/internal/importsource/import_git.go b/internal/flow/internal/importsource/import_git.go index e69a0da9e6..93a5da668d 100644 --- a/internal/flow/internal/importsource/import_git.go +++ b/internal/flow/internal/importsource/import_git.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/vcs" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/vm" ) // ImportGit imports a module from a git repository. diff --git a/internal/flow/internal/importsource/import_http.go b/internal/flow/internal/importsource/import_http.go index a19b4e82cb..15e29a3d67 100644 --- a/internal/flow/internal/importsource/import_http.go +++ b/internal/flow/internal/importsource/import_http.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component" common_config "github.com/grafana/agent/internal/component/common/config" remote_http "github.com/grafana/agent/internal/component/remote/http" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/vm" ) // ImportHTTP imports a module from a HTTP server via the remote.http component. diff --git a/internal/flow/internal/importsource/import_source.go b/internal/flow/internal/importsource/import_source.go index e25dfa175c..87faa86f35 100644 --- a/internal/flow/internal/importsource/import_source.go +++ b/internal/flow/internal/importsource/import_source.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/grafana/agent/internal/component" - "github.com/grafana/river/vm" + "github.com/grafana/alloy/syntax/vm" ) type SourceType int diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index 61d742d5d4..58debf4bb0 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -6,8 +6,8 @@ import ( "reflect" "github.com/grafana/agent/internal/component" - "github.com/grafana/river/rivertypes" - "github.com/grafana/river/vm" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/vm" ) // ImportString imports a module from a string. diff --git a/internal/flow/internal/testcomponents/module/file/file.go b/internal/flow/internal/testcomponents/module/file/file.go index 89e6f23f7d..f5ae8c212f 100644 --- a/internal/flow/internal/testcomponents/module/file/file.go +++ b/internal/flow/internal/testcomponents/module/file/file.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/local/file" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { diff --git a/internal/flow/internal/testcomponents/module/http/http.go b/internal/flow/internal/testcomponents/module/http/http.go index 04ca9043f8..b80a389eb8 100644 --- a/internal/flow/internal/testcomponents/module/http/http.go +++ b/internal/flow/internal/testcomponents/module/http/http.go @@ -10,7 +10,7 @@ import ( remote_http "github.com/grafana/agent/internal/component/remote/http" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { diff --git a/internal/flow/internal/testcomponents/module/string/string.go b/internal/flow/internal/testcomponents/module/string/string.go index 6a51c5cedb..df8f3e23d9 100644 --- a/internal/flow/internal/testcomponents/module/string/string.go +++ b/internal/flow/internal/testcomponents/module/string/string.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { diff --git a/internal/flow/logging/options.go b/internal/flow/logging/options.go index 2c17c4b2b4..0607bb12c2 100644 --- a/internal/flow/logging/options.go +++ b/internal/flow/logging/options.go @@ -7,7 +7,7 @@ import ( "math" "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" ) // Options is a set of options used to construct and configure a Logger. diff --git a/internal/flow/module.go b/internal/flow/module.go index 9c99c62fc7..3744187beb 100644 --- a/internal/flow/module.go +++ b/internal/flow/module.go @@ -13,8 +13,8 @@ import ( "github.com/grafana/agent/internal/flow/logging" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/river/ast" - "github.com/grafana/river/scanner" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/scanner" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/maps" ) diff --git a/internal/flow/source.go b/internal/flow/source.go index 1c1d8d897c..2ed0eee186 100644 --- a/internal/flow/source.go +++ b/internal/flow/source.go @@ -7,9 +7,9 @@ import ( "strings" "github.com/grafana/agent/internal/static/config/encoder" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" - "github.com/grafana/river/parser" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/parser" ) // A Source holds the contents of a parsed Flow source diff --git a/internal/flow/source_test.go b/internal/flow/source_test.go index be88b556c1..054e3bbcec 100644 --- a/internal/flow/source_test.go +++ b/internal/flow/source_test.go @@ -4,8 +4,8 @@ import ( "strings" "testing" - "github.com/grafana/river/ast" - "github.com/grafana/river/diag" + "github.com/grafana/alloy/syntax/ast" + "github.com/grafana/alloy/syntax/diag" "github.com/stretchr/testify/require" _ "github.com/grafana/agent/internal/flow/internal/testcomponents" // Include test components diff --git a/internal/flowmode/cmd_convert.go b/internal/flowmode/cmd_convert.go index be884cbbe1..fa9fe81a95 100644 --- a/internal/flowmode/cmd_convert.go +++ b/internal/flowmode/cmd_convert.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/agent/internal/converter" convert_diag "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/river/diag" + "github.com/grafana/alloy/syntax/diag" ) func convertCommand() *cobra.Command { diff --git a/internal/flowmode/cmd_fmt.go b/internal/flowmode/cmd_fmt.go index caacc9cd4b..a8251cd58b 100644 --- a/internal/flowmode/cmd_fmt.go +++ b/internal/flowmode/cmd_fmt.go @@ -9,9 +9,9 @@ import ( "github.com/spf13/cobra" - "github.com/grafana/river/diag" - "github.com/grafana/river/parser" - "github.com/grafana/river/printer" + "github.com/grafana/alloy/syntax/diag" + "github.com/grafana/alloy/syntax/parser" + "github.com/grafana/alloy/syntax/printer" ) func fmtCommand() *cobra.Command { diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index 3530853d87..c9d7585e5c 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -35,9 +35,9 @@ import ( uiservice "github.com/grafana/agent/internal/service/ui" "github.com/grafana/agent/internal/static/config/instrumentation" "github.com/grafana/agent/internal/usagestats" + "github.com/grafana/alloy/syntax/diag" "github.com/grafana/ckit/advertise" "github.com/grafana/ckit/peer" - "github.com/grafana/river/diag" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "go.opentelemetry.io/otel" diff --git a/internal/service/http/http_test.go b/internal/service/http/http_test.go index 4239b01d4a..37b2e2c5ed 100644 --- a/internal/service/http/http_test.go +++ b/internal/service/http/http_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/service" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" diff --git a/internal/service/http/tls.go b/internal/service/http/tls.go index a639967863..40f6332b1e 100644 --- a/internal/service/http/tls.go +++ b/internal/service/http/tls.go @@ -8,9 +8,9 @@ import ( "os" "time" + river "github.com/grafana/alloy/syntax" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/regexp" - "github.com/grafana/river" - "github.com/grafana/river/rivertypes" ) // TLSArguments configures TLS settings for the HTTP service. diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index 6ddad9b824..e840b850b6 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -20,7 +20,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/service" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" commonconfig "github.com/prometheus/common/config" ) diff --git a/internal/service/remotecfg/remotecfg_test.go b/internal/service/remotecfg/remotecfg_test.go index c6b5fc8374..e469081f0e 100644 --- a/internal/service/remotecfg/remotecfg_test.go +++ b/internal/service/remotecfg/remotecfg_test.go @@ -19,7 +19,7 @@ import ( "github.com/grafana/agent/internal/flow/logging" "github.com/grafana/agent/internal/service" "github.com/grafana/agent/internal/util" - "github.com/grafana/river" + river "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/vcs/auth.go b/internal/vcs/auth.go index ef1dd6ddff..66da22dcad 100644 --- a/internal/vcs/auth.go +++ b/internal/vcs/auth.go @@ -6,7 +6,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/plumbing/transport/ssh" - "github.com/grafana/river/rivertypes" + rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) type GitAuthConfig struct { From bfcaf3bd4b2e833f6f082dc6d8e28dd2a042202a Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:23:11 -0400 Subject: [PATCH 042/136] all: remove rivertypes import alias in favor of alloytypes --- .../writing-exporter-flow-components.md | 40 +++++++++---------- internal/component/common/config/types.go | 14 +++---- internal/component/discovery/aws/ec2.go | 4 +- internal/component/discovery/aws/lightsail.go | 4 +- internal/component/discovery/azure/azure.go | 4 +- internal/component/discovery/consul/consul.go | 6 +-- .../discovery/consulagent/consulagent.go | 6 +-- .../discovery/digitalocean/digitalocean.go | 4 +- .../discovery/dockerswarm/dockerswarm_test.go | 4 +- .../component/discovery/eureka/eureka_test.go | 4 +- .../component/discovery/ionos/ionos_test.go | 4 +- .../component/discovery/marathon/marathon.go | 4 +- .../discovery/marathon/marathon_test.go | 6 +-- .../discovery/openstack/openstack.go | 6 +-- .../component/discovery/ovhcloud/ovhcloud.go | 6 +-- .../component/discovery/scaleway/scaleway.go | 4 +- internal/component/discovery/uyuni/uyuni.go | 4 +- internal/component/faro/receiver/arguments.go | 4 +- internal/component/local/file/file.go | 6 +-- internal/component/local/file/file_test.go | 10 ++--- .../loki/source/aws_firehose/component.go | 4 +- .../loki/source/cloudflare/cloudflare.go | 4 +- internal/component/loki/source/kafka/kafka.go | 4 +- .../component/otelcol/auth/basic/basic.go | 4 +- .../component/otelcol/auth/bearer/bearer.go | 4 +- .../component/otelcol/auth/headers/headers.go | 4 +- .../component/otelcol/auth/oauth2/oauth2.go | 4 +- internal/component/otelcol/config_tls.go | 4 +- .../internal/consul/config.go | 4 +- .../component/otelcol/receiver/kafka/kafka.go | 8 ++-- .../otelcol/receiver/vcenter/vcenter.go | 4 +- .../prometheus/exporter/blackbox/blackbox.go | 4 +- .../elasticsearch/elasticsearch_test.go | 4 +- .../prometheus/exporter/github/github.go | 4 +- .../prometheus/exporter/kafka/kafka.go | 4 +- .../prometheus/exporter/mongodb/mongodb.go | 4 +- .../prometheus/exporter/mssql/mssql.go | 6 +-- .../prometheus/exporter/mssql/mssql_test.go | 22 +++++----- .../prometheus/exporter/mysql/mysql.go | 4 +- .../prometheus/exporter/mysql/mysql_test.go | 6 +-- .../prometheus/exporter/oracledb/oracledb.go | 4 +- .../exporter/oracledb/oracledb_test.go | 14 +++---- .../prometheus/exporter/postgres/postgres.go | 4 +- .../exporter/postgres/postgres_test.go | 3 +- .../prometheus/exporter/redis/redis.go | 4 +- .../prometheus/exporter/snmp/snmp.go | 4 +- .../exporter/snowflake/snowflake.go | 4 +- .../exporter/snowflake/snowflake_test.go | 4 +- .../prometheus/exporter/squid/squid.go | 4 +- .../prometheus/exporter/squid/squid_test.go | 4 +- .../component/prometheus/remotewrite/types.go | 4 +- internal/component/remote/http/http.go | 6 +-- internal/component/remote/http/http_test.go | 6 +-- .../component/remote/kubernetes/kubernetes.go | 10 ++--- internal/component/remote/s3/s3.go | 4 +- internal/component/remote/s3/types.go | 6 +-- internal/component/remote/vault/auth.go | 12 +++--- internal/component/remote/vault/vault.go | 10 ++--- internal/component/remote/vault/vault_test.go | 14 +++---- .../internal/common/http_client_config.go | 20 +++++----- .../converter/internal/common/river_utils.go | 8 ++-- .../converter_basicauthextension.go | 4 +- .../converter_bearertokenauthextension.go | 6 +-- .../converter_headerssetterextension.go | 6 +-- .../otelcolconvert/converter_kafkareceiver.go | 8 ++-- .../converter_oauth2clientauthextension.go | 4 +- .../otelcolconvert/converter_otlpreceiver.go | 4 +- .../prometheusconvert/component/azure.go | 4 +- .../prometheusconvert/component/consul.go | 6 +-- .../component/digitalocean.go | 4 +- .../prometheusconvert/component/ec2.go | 4 +- .../prometheusconvert/component/lightsail.go | 4 +- .../prometheusconvert/component/marathon.go | 4 +- .../prometheusconvert/component/openstack.go | 6 +-- .../prometheusconvert/component/ovhcloud.go | 6 +-- .../component/remote_write.go | 4 +- .../prometheusconvert/component/scaleway.go | 4 +- .../internal/build/cloudflare.go | 6 +-- .../internal/build/consul_agent.go | 6 +-- .../promtailconvert/internal/build/kafka.go | 6 +-- .../internal/build/app_agent_receiver.go | 4 +- .../internal/build/blackbox_exporter.go | 6 +-- .../internal/build/elasticsearch_exporter.go | 4 +- .../internal/build/github_exporter.go | 4 +- .../internal/build/kafka_exporter.go | 4 +- .../internal/build/mongodb_exporter.go | 4 +- .../internal/build/mssql_exporter.go | 4 +- .../internal/build/mysqld_exporter.go | 4 +- .../internal/build/oracledb_exporter.go | 4 +- .../internal/build/postgres_exporter.go | 6 +-- .../internal/build/redis_exporter.go | 4 +- .../internal/build/snmp_exporter.go | 6 +-- .../internal/build/snowflake_exporter.go | 4 +- .../internal/build/squid_exporter.go | 4 +- .../internal/importsource/import_string.go | 4 +- .../testcomponents/module/file/file.go | 8 ++-- .../testcomponents/module/http/http.go | 8 ++-- .../testcomponents/module/string/string.go | 4 +- internal/service/http/tls.go | 4 +- internal/vcs/auth.go | 8 ++-- 100 files changed, 297 insertions(+), 296 deletions(-) diff --git a/docs/developer/writing-exporter-flow-components.md b/docs/developer/writing-exporter-flow-components.md index fb0681ae04..401b21b0d5 100644 --- a/docs/developer/writing-exporter-flow-components.md +++ b/docs/developer/writing-exporter-flow-components.md @@ -1,6 +1,6 @@ # Create Prometheus Exporter Flow Components -This guide will walk you through the process of creating a new Prometheus exporter Flow component and best practices for implementing it. +This guide will walk you through the process of creating a new Prometheus exporter Flow component and best practices for implementing it. It is required that the exporter has an existing [Agent integration](../sources/static/configuration/integrations/_index.md) in order to wrap it as a Flow component. In the future, we will drop this requirement and Flow components will expose the logic of the exporter directly. @@ -14,10 +14,10 @@ Use the following exporters as a reference: `Arguments` struct defines the arguments that can be passed to the component. In most cases, this would be exactly the same as the arguments that the integration for this exporter uses. Some recommendations: - Use `attr` tag for representing values. Use `attr,optional` tag for optional arguments. -- Use `rivertypes.Secret` type for sensitive arguments (e.g. API keys, passwords, etc). The original integration should have a similar field type called `Secret` from Prometheus. +- Use `alloytypes.Secret` type for sensitive arguments (e.g. API keys, passwords, etc). The original integration should have a similar field type called `Secret` from Prometheus. - Use `block` tag for representing nested values such slices or structs. For example, the [process_exporter](../../component/prometheus/exporter/process/process.go) `Arguments` struct has `ProcessExporter` param which is a `[]MatcherGroup`. The name of the parameter should be in singular. This will allow the user to define multiple blocks of the same type. -The river config would look like this using `matcher` block multiple times: +The config would look like this using `matcher` block multiple times: ```river prometheus.exporter.process "example" { @@ -27,42 +27,42 @@ prometheus.exporter.process "example" { } matcher { comm = ["firefox"] - } + } } ``` -- Use `label` tag in field of struct represented as block to define named blocks. For example, the [blackbox_exporter](../../component/prometheus/exporter/blackbox/blackbox.go) `BlackboxTarget` struct has a `Name` param which represents the name of the block. +- Use `label` tag in field of struct represented as block to define named blocks. For example, the [blackbox_exporter](../../component/prometheus/exporter/blackbox/blackbox.go) `BlackboxTarget` struct has a `Name` param which represents the name of the block. -The river config would look like this: +The config would look like this: ```river -prometheus.exporter.blackbox "example" { - config_file = "blackbox_modules.yml" - - target { - name = "example" - address = "http://example.com" - module = "http_2xx" - } +prometheus.exporter.blackbox "example" { + config_file = "blackbox_modules.yml" + + target { + name = "example" + address = "http://example.com" + module = "http_2xx" + } } ``` -- Define `DefaultArguments` as a global variable to define the default arguments for the component. +- Define `DefaultArguments` as a global variable to define the default arguments for the component. ## Functions -- Define `init` function to register the component using `component.Register`. +- Define `init` function to register the component using `component.Register`. - The `Build` param should be a function that returns a `component.Component` interface. - The name used in the second parameter of `exporter.New` when defining the `Build` function it's important as it will define the label `job` in the form of `integrations/`. - - Avoid creating components with `Singleton: true` as it will make it impossible to run multiple instances of the exporter. + - Avoid creating components with `Singleton: true` as it will make it impossible to run multiple instances of the exporter. - If the exporter follows the multi-target pattern, add a function to define Prometheus discovery targets and use `exporter.NewWithTargetBuilder` for the `Build` param of the `component.Register` function. - If the exporter implements a custom `InstanceKey`, add a function to customize the value of the instance label and use `exporter.NewWithTargetBuilder` for the `Build` param of the `component.Register` function. -- Define the `SetToDefault` function implementing river.Defaulter to specify the default arguments for the component. +- Define the `SetToDefault` function implementing syntax.Defaulter to specify the default arguments for the component. -- Define the `Validate` function implementing river.Validator to specify any validation rules for the component arguments. +- Define the `Validate` function implementing syntax.Validator to specify any validation rules for the component arguments. - Add a test to validate the unmarshalling covering as many cases as possible. @@ -74,4 +74,4 @@ In order to make the component visible for Agent Flow, it needs to be added to [ ## Documentation -Writing the documentation for the component is very important. Please, follow the [Writing documentation for Flow components](./writing-flow-component-documentation.md) and take a look at the existing documentation for other exporters. \ No newline at end of file +Writing the documentation for the component is very important. Please, follow the [Writing documentation for Flow components](./writing-flow-component-documentation.md) and take a look at the existing documentation for other exporters. diff --git a/internal/component/common/config/types.go b/internal/component/common/config/types.go index 416844b74d..561b9e7aa0 100644 --- a/internal/component/common/config/types.go +++ b/internal/component/common/config/types.go @@ -7,7 +7,7 @@ import ( "net/url" "strings" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) @@ -18,7 +18,7 @@ type HTTPClientConfig struct { BasicAuth *BasicAuth `river:"basic_auth,block,optional"` Authorization *Authorization `river:"authorization,block,optional"` OAuth2 *OAuth2Config `river:"oauth2,block,optional"` - BearerToken rivertypes.Secret `river:"bearer_token,attr,optional"` + BearerToken alloytypes.Secret `river:"bearer_token,attr,optional"` BearerTokenFile string `river:"bearer_token_file,attr,optional"` ProxyConfig *ProxyConfig `river:",squash"` TLSConfig TLSConfig `river:"tls_config,block,optional"` @@ -110,7 +110,7 @@ var DefaultHTTPClientConfig = HTTPClientConfig{ // BasicAuth configures Basic HTTP authentication credentials. type BasicAuth struct { Username string `river:"username,attr,optional"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` PasswordFile string `river:"password_file,attr,optional"` } @@ -215,7 +215,7 @@ func (u *URL) Convert() config.URL { } type Header struct { - Header map[string][]rivertypes.Secret `river:"proxy_connect_header,attr,optional"` + Header map[string][]alloytypes.Secret `river:"proxy_connect_header,attr,optional"` } func (h *Header) Convert() config.Header { @@ -239,7 +239,7 @@ func (h *Header) Convert() config.Header { // Authorization sets up HTTP authorization credentials. type Authorization struct { Type string `river:"type,attr,optional"` - Credentials rivertypes.Secret `river:"credentials,attr,optional"` + Credentials alloytypes.Secret `river:"credentials,attr,optional"` CredentialsFile string `river:"credentials_file,attr,optional"` } @@ -305,7 +305,7 @@ type TLSConfig struct { CAFile string `river:"ca_file,attr,optional"` Cert string `river:"cert_pem,attr,optional"` CertFile string `river:"cert_file,attr,optional"` - Key rivertypes.Secret `river:"key_pem,attr,optional"` + Key alloytypes.Secret `river:"key_pem,attr,optional"` KeyFile string `river:"key_file,attr,optional"` ServerName string `river:"server_name,attr,optional"` InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` @@ -359,7 +359,7 @@ func (t *TLSConfig) Validate() error { // OAuth2Config sets up the OAuth2 client. type OAuth2Config struct { ClientID string `river:"client_id,attr,optional"` - ClientSecret rivertypes.Secret `river:"client_secret,attr,optional"` + ClientSecret alloytypes.Secret `river:"client_secret,attr,optional"` ClientSecretFile string `river:"client_secret_file,attr,optional"` Scopes []string `river:"scopes,attr,optional"` TokenURL string `river:"token_url,attr,optional"` diff --git a/internal/component/discovery/aws/ec2.go b/internal/component/discovery/aws/ec2.go index 0a81afd082..72ce4464ab 100644 --- a/internal/component/discovery/aws/ec2.go +++ b/internal/component/discovery/aws/ec2.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promaws "github.com/prometheus/prometheus/discovery/aws" @@ -40,7 +40,7 @@ type EC2Arguments struct { Endpoint string `river:"endpoint,attr,optional"` Region string `river:"region,attr,optional"` AccessKey string `river:"access_key,attr,optional"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` Profile string `river:"profile,attr,optional"` RoleARN string `river:"role_arn,attr,optional"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` diff --git a/internal/component/discovery/aws/lightsail.go b/internal/component/discovery/aws/lightsail.go index f04b6b0740..ca35979fba 100644 --- a/internal/component/discovery/aws/lightsail.go +++ b/internal/component/discovery/aws/lightsail.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promaws "github.com/prometheus/prometheus/discovery/aws" @@ -34,7 +34,7 @@ type LightsailArguments struct { Endpoint string `river:"endpoint,attr,optional"` Region string `river:"region,attr,optional"` AccessKey string `river:"access_key,attr,optional"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` Profile string `river:"profile,attr,optional"` RoleARN string `river:"role_arn,attr,optional"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` diff --git a/internal/component/discovery/azure/azure.go b/internal/component/discovery/azure/azure.go index aaa6392f51..61b4857737 100644 --- a/internal/component/discovery/azure/azure.go +++ b/internal/component/discovery/azure/azure.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" common "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/azure" @@ -46,7 +46,7 @@ type Arguments struct { type OAuth struct { ClientID string `river:"client_id,attr"` TenantID string `river:"tenant_id,attr"` - ClientSecret rivertypes.Secret `river:"client_secret,attr"` + ClientSecret alloytypes.Secret `river:"client_secret,attr"` } type ManagedIdentity struct { diff --git a/internal/component/discovery/consul/consul.go b/internal/component/discovery/consul/consul.go index 095e99173c..529b44af9e 100644 --- a/internal/component/discovery/consul/consul.go +++ b/internal/component/discovery/consul/consul.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/consul" @@ -29,14 +29,14 @@ func init() { type Arguments struct { Server string `river:"server,attr,optional"` - Token rivertypes.Secret `river:"token,attr,optional"` + Token alloytypes.Secret `river:"token,attr,optional"` Datacenter string `river:"datacenter,attr,optional"` Namespace string `river:"namespace,attr,optional"` Partition string `river:"partition,attr,optional"` TagSeparator string `river:"tag_separator,attr,optional"` Scheme string `river:"scheme,attr,optional"` Username string `river:"username,attr,optional"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` AllowStale bool `river:"allow_stale,attr,optional"` Services []string `river:"services,attr,optional"` ServiceTags []string `river:"tags,attr,optional"` diff --git a/internal/component/discovery/consulagent/consulagent.go b/internal/component/discovery/consulagent/consulagent.go index 61453edaac..76f96c7370 100644 --- a/internal/component/discovery/consulagent/consulagent.go +++ b/internal/component/discovery/consulagent/consulagent.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" ) @@ -28,12 +28,12 @@ func init() { type Arguments struct { Server string `river:"server,attr,optional"` - Token rivertypes.Secret `river:"token,attr,optional"` + Token alloytypes.Secret `river:"token,attr,optional"` Datacenter string `river:"datacenter,attr,optional"` TagSeparator string `river:"tag_separator,attr,optional"` Scheme string `river:"scheme,attr,optional"` Username string `river:"username,attr,optional"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` Services []string `river:"services,attr,optional"` ServiceTags []string `river:"tags,attr,optional"` diff --git a/internal/component/discovery/digitalocean/digitalocean.go b/internal/component/discovery/digitalocean/digitalocean.go index a7f32f80f2..7fe661fcf3 100644 --- a/internal/component/discovery/digitalocean/digitalocean.go +++ b/internal/component/discovery/digitalocean/digitalocean.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/digitalocean" ) @@ -30,7 +30,7 @@ type Arguments struct { RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` Port int `river:"port,attr,optional"` - BearerToken rivertypes.Secret `river:"bearer_token,attr,optional"` + BearerToken alloytypes.Secret `river:"bearer_token,attr,optional"` BearerTokenFile string `river:"bearer_token_file,attr,optional"` ProxyConfig *config.ProxyConfig `river:",squash"` diff --git a/internal/component/discovery/dockerswarm/dockerswarm_test.go b/internal/component/discovery/dockerswarm/dockerswarm_test.go index 7848107767..de83ac9819 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm_test.go +++ b/internal/component/discovery/dockerswarm/dockerswarm_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -42,7 +42,7 @@ func TestRiverUnmarshal(t *testing.T) { assert.Equal(t, 81, args.Port) assert.Equal(t, 12*time.Second, args.RefreshInterval) assert.Equal(t, "username", args.HTTPClientConfig.BasicAuth.Username) - assert.Equal(t, rivertypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) + assert.Equal(t, alloytypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) } func TestConvert(t *testing.T) { diff --git a/internal/component/discovery/eureka/eureka_test.go b/internal/component/discovery/eureka/eureka_test.go index 5492c690ba..5bd367f08f 100644 --- a/internal/component/discovery/eureka/eureka_test.go +++ b/internal/component/discovery/eureka/eureka_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_sd "github.com/prometheus/prometheus/discovery/eureka" @@ -28,7 +28,7 @@ func TestUnmarshal(t *testing.T) { require.Equal(t, "http://localhost:8080/eureka/v1", args.Server) require.Equal(t, "10s", args.RefreshInterval.String()) require.Equal(t, "exampleuser", args.HTTPClientConfig.BasicAuth.Username) - require.Equal(t, rivertypes.Secret("examplepassword"), args.HTTPClientConfig.BasicAuth.Password) + require.Equal(t, alloytypes.Secret("examplepassword"), args.HTTPClientConfig.BasicAuth.Password) } func TestValidate(t *testing.T) { diff --git a/internal/component/discovery/ionos/ionos_test.go b/internal/component/discovery/ionos/ionos_test.go index d5a90b4eb6..b732111896 100644 --- a/internal/component/discovery/ionos/ionos_test.go +++ b/internal/component/discovery/ionos/ionos_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ func TestRiverUnmarshal(t *testing.T) { assert.Equal(t, 20*time.Second, args.RefreshInterval) assert.Equal(t, 60, args.Port) assert.Equal(t, "username", args.HTTPClientConfig.BasicAuth.Username) - assert.Equal(t, rivertypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) + assert.Equal(t, alloytypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) } func TestConvert(t *testing.T) { diff --git a/internal/component/discovery/marathon/marathon.go b/internal/component/discovery/marathon/marathon.go index 7b842ccfc9..1c75473aaf 100644 --- a/internal/component/discovery/marathon/marathon.go +++ b/internal/component/discovery/marathon/marathon.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/marathon" @@ -30,7 +30,7 @@ func init() { type Arguments struct { Servers []string `river:"servers,attr"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - AuthToken rivertypes.Secret `river:"auth_token,attr,optional"` + AuthToken alloytypes.Secret `river:"auth_token,attr,optional"` AuthTokenFile string `river:"auth_token_file,attr,optional"` HTTPClientConfig config.HTTPClientConfig `river:",squash"` } diff --git a/internal/component/discovery/marathon/marathon_test.go b/internal/component/discovery/marathon/marathon_test.go index bfde1faad7..9ba2605a7b 100644 --- a/internal/component/discovery/marathon/marathon_test.go +++ b/internal/component/discovery/marathon/marathon_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -26,7 +26,7 @@ func TestRiverUnmarshalWithAuthToken(t *testing.T) { require.ElementsMatch(t, []string{"serv1", "serv2"}, args.Servers) assert.Equal(t, 20*time.Second, args.RefreshInterval) - assert.Equal(t, rivertypes.Secret("auth_token"), args.AuthToken) + assert.Equal(t, alloytypes.Secret("auth_token"), args.AuthToken) } func TestRiverUnmarshalWithAuthTokenFile(t *testing.T) { @@ -62,7 +62,7 @@ func TestRiverUnmarshalWithBasicAuth(t *testing.T) { require.ElementsMatch(t, []string{"serv1", "serv2"}, args.Servers) assert.Equal(t, 20*time.Second, args.RefreshInterval) assert.Equal(t, "username", args.HTTPClientConfig.BasicAuth.Username) - assert.Equal(t, rivertypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) + assert.Equal(t, alloytypes.Secret("pass"), args.HTTPClientConfig.BasicAuth.Password) } func TestConvert(t *testing.T) { diff --git a/internal/component/discovery/openstack/openstack.go b/internal/component/discovery/openstack/openstack.go index b220c90b06..1ab76dceb0 100644 --- a/internal/component/discovery/openstack/openstack.go +++ b/internal/component/discovery/openstack/openstack.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/openstack" @@ -31,14 +31,14 @@ type Arguments struct { IdentityEndpoint string `river:"identity_endpoint,attr,optional"` Username string `river:"username,attr,optional"` UserID string `river:"userid,attr,optional"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` ProjectName string `river:"project_name,attr,optional"` ProjectID string `river:"project_id,attr,optional"` DomainName string `river:"domain_name,attr,optional"` DomainID string `river:"domain_id,attr,optional"` ApplicationCredentialName string `river:"application_credential_name,attr,optional"` ApplicationCredentialID string `river:"application_credential_id,attr,optional"` - ApplicationCredentialSecret rivertypes.Secret `river:"application_credential_secret,attr,optional"` + ApplicationCredentialSecret alloytypes.Secret `river:"application_credential_secret,attr,optional"` Role string `river:"role,attr"` Region string `river:"region,attr"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` diff --git a/internal/component/discovery/ovhcloud/ovhcloud.go b/internal/component/discovery/ovhcloud/ovhcloud.go index 2a3605d327..9fc31defe8 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud.go +++ b/internal/component/discovery/ovhcloud/ovhcloud.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" @@ -30,8 +30,8 @@ func init() { type Arguments struct { Endpoint string `river:"endpoint,attr,optional"` ApplicationKey string `river:"application_key,attr"` - ApplicationSecret rivertypes.Secret `river:"application_secret,attr"` - ConsumerKey rivertypes.Secret `river:"consumer_key,attr"` + ApplicationSecret alloytypes.Secret `river:"application_secret,attr"` + ConsumerKey alloytypes.Secret `river:"consumer_key,attr"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` Service string `river:"service,attr"` } diff --git a/internal/component/discovery/scaleway/scaleway.go b/internal/component/discovery/scaleway/scaleway.go index 303d7aeaf6..7a50f4073e 100644 --- a/internal/component/discovery/scaleway/scaleway.go +++ b/internal/component/discovery/scaleway/scaleway.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/scaleway" @@ -37,7 +37,7 @@ type Arguments struct { APIURL string `river:"api_url,attr,optional"` Zone string `river:"zone,attr,optional"` AccessKey string `river:"access_key,attr"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` SecretKeyFile string `river:"secret_key_file,attr,optional"` NameFilter string `river:"name_filter,attr,optional"` TagsFilter []string `river:"tags_filter,attr,optional"` diff --git a/internal/component/discovery/uyuni/uyuni.go b/internal/component/discovery/uyuni/uyuni.go index 8aaf376fbe..84d155100f 100644 --- a/internal/component/discovery/uyuni/uyuni.go +++ b/internal/component/discovery/uyuni/uyuni.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/uyuni" @@ -31,7 +31,7 @@ func init() { type Arguments struct { Server string `river:"server,attr"` Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` Entitlement string `river:"entitlement,attr,optional"` Separator string `river:"separator,attr,optional"` RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index 405c26c40d..2bc233b363 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/otelcol" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) // Arguments configures the app_agent_receiver component. @@ -33,7 +33,7 @@ type ServerArguments struct { Host string `river:"listen_address,attr,optional"` Port int `river:"listen_port,attr,optional"` CORSAllowedOrigins []string `river:"cors_allowed_origins,attr,optional"` - APIKey rivertypes.Secret `river:"api_key,attr,optional"` + APIKey alloytypes.Secret `river:"api_key,attr,optional"` MaxAllowedPayloadSize units.Base2Bytes `river:"max_allowed_payload_size,attr,optional"` RateLimiting RateLimitingArguments `river:"rate_limiting,block,optional"` diff --git a/internal/component/local/file/file.go b/internal/component/local/file/file.go index 9533549198..194f340474 100644 --- a/internal/component/local/file/file.go +++ b/internal/component/local/file/file.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/featuregate" filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/logging/level" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) // waitReadPeriod holds the time to wait before reading a file while the @@ -66,7 +66,7 @@ func (a *Arguments) SetToDefault() { // Exports holds values which are exported by the local.file component. type Exports struct { // Content of the file. - Content rivertypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `river:"content,attr"` } // Component implements the local.file component. @@ -173,7 +173,7 @@ func (c *Component) readFile() error { c.lastAccessed.SetToCurrentTime() c.opts.OnStateChange(Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: c.args.IsSecret, Value: c.latestContent, }, diff --git a/internal/component/local/file/file_test.go b/internal/component/local/file/file_test.go index 9eeff6e01b..8f6e304d41 100644 --- a/internal/component/local/file/file_test.go +++ b/internal/component/local/file/file_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/local/file" filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/componenttest" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) @@ -48,7 +48,7 @@ func runFileTests(t *testing.T, ut filedetector.Detector) { // Swallow the initial exports notification. require.NoError(t, tc.WaitExports(time.Second)) require.Equal(t, file.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "First load!", }, @@ -65,7 +65,7 @@ func runFileTests(t *testing.T, ut filedetector.Detector) { require.NoError(t, sc.WaitExports(time.Second)) require.Equal(t, file.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "New content!", }, @@ -82,7 +82,7 @@ func runFileTests(t *testing.T, ut filedetector.Detector) { require.NoError(t, sc.WaitExports(time.Second)) require.Equal(t, file.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "New content!", }, @@ -109,7 +109,7 @@ func TestFile_ImmediateExports(t *testing.T) { require.NoError(t, tc.WaitExports(time.Second)) require.Equal(t, file.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "Hello, world!", }, diff --git a/internal/component/loki/source/aws_firehose/component.go b/internal/component/loki/source/aws_firehose/component.go index a0215f7bef..cd23ceb6a8 100644 --- a/internal/component/loki/source/aws_firehose/component.go +++ b/internal/component/loki/source/aws_firehose/component.go @@ -9,6 +9,7 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/relabel" @@ -18,7 +19,6 @@ import ( flow_relabel "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/loki/source/aws_firehose/internal" "github.com/grafana/agent/internal/util" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" ) func init() { @@ -35,7 +35,7 @@ func init() { type Arguments struct { Server *fnet.ServerConfig `river:",squash"` - AccessKey rivertypes.Secret `river:"access_key,attr,optional"` + AccessKey alloytypes.Secret `river:"access_key,attr,optional"` UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` diff --git a/internal/component/loki/source/cloudflare/cloudflare.go b/internal/component/loki/source/cloudflare/cloudflare.go index fa14de31fa..718ef89e07 100644 --- a/internal/component/loki/source/cloudflare/cloudflare.go +++ b/internal/component/loki/source/cloudflare/cloudflare.go @@ -19,7 +19,7 @@ import ( cft "github.com/grafana/agent/internal/component/loki/source/cloudflare/internal/cloudflaretarget" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" ) @@ -38,7 +38,7 @@ func init() { // Arguments holds values which are used to configure the // loki.source.cloudflare component. type Arguments struct { - APIToken rivertypes.Secret `river:"api_token,attr"` + APIToken alloytypes.Secret `river:"api_token,attr"` ZoneID string `river:"zone_id,attr"` Labels map[string]string `river:"labels,attr,optional"` Workers int `river:"workers,attr,optional"` diff --git a/internal/component/loki/source/kafka/kafka.go b/internal/component/loki/source/kafka/kafka.go index 10265c6f29..5ac0240545 100644 --- a/internal/component/loki/source/kafka/kafka.go +++ b/internal/component/loki/source/kafka/kafka.go @@ -12,7 +12,7 @@ import ( kt "github.com/grafana/agent/internal/component/loki/source/internal/kafkatarget" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" ) @@ -56,7 +56,7 @@ type KafkaAuthentication struct { type KafkaSASLConfig struct { Mechanism string `river:"mechanism,attr,optional"` User string `river:"user,attr,optional"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` UseTLS bool `river:"use_tls,attr,optional"` TLSConfig config.TLSConfig `river:"tls_config,block,optional"` OAuthConfig OAuthConfigConfig `river:"oauth_config,block,optional"` diff --git a/internal/component/otelcol/auth/basic/basic.go b/internal/component/otelcol/auth/basic/basic.go index 2997c9b665..6ccec98a5d 100644 --- a/internal/component/otelcol/auth/basic/basic.go +++ b/internal/component/otelcol/auth/basic/basic.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" @@ -31,7 +31,7 @@ type Arguments struct { // TODO(rfratto): should we support htpasswd? Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` } var _ auth.Arguments = Arguments{} diff --git a/internal/component/otelcol/auth/bearer/bearer.go b/internal/component/otelcol/auth/bearer/bearer.go index 55510036fa..7e31dc05ed 100644 --- a/internal/component/otelcol/auth/bearer/bearer.go +++ b/internal/component/otelcol/auth/bearer/bearer.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" @@ -30,7 +30,7 @@ func init() { type Arguments struct { // Do not include the "filename" attribute - users should use local.file instead. Scheme string `river:"scheme,attr,optional"` - Token rivertypes.Secret `river:"token,attr"` + Token alloytypes.Secret `river:"token,attr"` } var _ auth.Arguments = Arguments{} diff --git a/internal/component/otelcol/auth/headers/headers.go b/internal/component/otelcol/auth/headers/headers.go index f1f9d391ca..b216949721 100644 --- a/internal/component/otelcol/auth/headers/headers.go +++ b/internal/component/otelcol/auth/headers/headers.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -133,7 +133,7 @@ func (a *Action) UnmarshalText(text []byte) error { // Header is an individual Header to send along with requests. type Header struct { Key string `river:"key,attr"` - Value *rivertypes.OptionalSecret `river:"value,attr,optional"` + Value *alloytypes.OptionalSecret `river:"value,attr,optional"` FromContext *string `river:"from_context,attr,optional"` Action Action `river:"action,attr,optional"` } diff --git a/internal/component/otelcol/auth/oauth2/oauth2.go b/internal/component/otelcol/auth/oauth2/oauth2.go index 642a6225a4..b67deaed89 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2.go +++ b/internal/component/otelcol/auth/oauth2/oauth2.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" @@ -33,7 +33,7 @@ func init() { type Arguments struct { ClientID string `river:"client_id,attr,optional"` ClientIDFile string `river:"client_id_file,attr,optional"` - ClientSecret rivertypes.Secret `river:"client_secret,attr,optional"` + ClientSecret alloytypes.Secret `river:"client_secret,attr,optional"` ClientSecretFile string `river:"client_secret_file,attr,optional"` TokenURL string `river:"token_url,attr"` EndpointParams url.Values `river:"endpoint_params,attr,optional"` diff --git a/internal/component/otelcol/config_tls.go b/internal/component/otelcol/config_tls.go index 65a460d87b..04a3dbd653 100644 --- a/internal/component/otelcol/config_tls.go +++ b/internal/component/otelcol/config_tls.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" otelconfigtls "go.opentelemetry.io/collector/config/configtls" ) @@ -58,7 +58,7 @@ type TLSSetting struct { CAFile string `river:"ca_file,attr,optional"` Cert string `river:"cert_pem,attr,optional"` CertFile string `river:"cert_file,attr,optional"` - Key rivertypes.Secret `river:"key_pem,attr,optional"` + Key alloytypes.Secret `river:"key_pem,attr,optional"` KeyFile string `river:"key_file,attr,optional"` MinVersion string `river:"min_version,attr,optional"` MaxVersion string `river:"max_version,attr,optional"` diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index 4862f728b2..70d88b7e77 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -3,7 +3,7 @@ package consul import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" ) @@ -23,7 +23,7 @@ type Config struct { // agent's default (empty) token. Token is only required if // [Consul's ACL System](https://www.consul.io/docs/security/acl/acl-system) // is enabled. - Token rivertypes.Secret `river:"token,attr,optional"` + Token alloytypes.Secret `river:"token,attr,optional"` // TokenFile is not necessary in River because users can use the local.file // Flow component instead. diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index 08a6cabc35..109add84a1 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" @@ -151,7 +151,7 @@ func (args AuthenticationArguments) Convert() map[string]interface{} { // broker. type PlaintextArguments struct { Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` } // Convert converts args into the upstream type. @@ -165,7 +165,7 @@ func (args PlaintextArguments) Convert() map[string]interface{} { // SASLArguments configures SASL authentication against the Kafka broker. type SASLArguments struct { Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` Mechanism string `river:"mechanism,attr"` Version int `river:"version,attr,optional"` AWSMSK AWSMSKArguments `river:"aws_msk,block,optional"` @@ -204,7 +204,7 @@ type KerberosArguments struct { Realm string `river:"realm,attr,optional"` UseKeyTab bool `river:"use_keytab,attr,optional"` Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr,optional"` + Password alloytypes.Secret `river:"password,attr,optional"` ConfigPath string `river:"config_file,attr,optional"` KeyTabPath string `river:"keytab_file,attr,optional"` } diff --git a/internal/component/otelcol/receiver/vcenter/vcenter.go b/internal/component/otelcol/receiver/vcenter/vcenter.go index 11e19b464b..b83f97f727 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" otelcomponent "go.opentelemetry.io/collector/component" @@ -259,7 +259,7 @@ func (args *MetricsBuilderConfig) Convert() map[string]interface{} { type Arguments struct { Endpoint string `river:"endpoint,attr"` Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` MetricsBuilderConfig MetricsBuilderConfig `river:",squash"` diff --git a/internal/component/prometheus/exporter/blackbox/blackbox.go b/internal/component/prometheus/exporter/blackbox/blackbox.go index c71a93bd6c..5e1e84c831 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" "github.com/grafana/agent/internal/util" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func init() { @@ -92,7 +92,7 @@ func (t TargetBlock) Convert() []blackbox_exporter.BlackboxTarget { type Arguments struct { ConfigFile string `river:"config_file,attr,optional"` - Config rivertypes.OptionalSecret `river:"config,attr,optional"` + Config alloytypes.OptionalSecret `river:"config,attr,optional"` Targets TargetBlock `river:"target,block"` ProbeTimeoutOffset time.Duration `river:"probe_timeout_offset,attr,optional"` } diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go index a7756191bb..60708e447f 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go @@ -7,7 +7,7 @@ import ( commonCfg "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promCfg "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) @@ -59,7 +59,7 @@ func TestRiverUnmarshal(t *testing.T) { ExportSLM: true, BasicAuth: &commonCfg.BasicAuth{ Username: "username", - Password: rivertypes.Secret("pass"), + Password: alloytypes.Secret("pass"), }, } diff --git a/internal/component/prometheus/exporter/github/github.go b/internal/component/prometheus/exporter/github/github.go index b0d36af09b..eca1679719 100644 --- a/internal/component/prometheus/exporter/github/github.go +++ b/internal/component/prometheus/exporter/github/github.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/github_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -37,7 +37,7 @@ type Arguments struct { Repositories []string `river:"repositories,attr,optional"` Organizations []string `river:"organizations,attr,optional"` Users []string `river:"users,attr,optional"` - APIToken rivertypes.Secret `river:"api_token,attr,optional"` + APIToken alloytypes.Secret `river:"api_token,attr,optional"` APITokenFile string `river:"api_token_file,attr,optional"` } diff --git a/internal/component/prometheus/exporter/kafka/kafka.go b/internal/component/prometheus/exporter/kafka/kafka.go index 09982a742a..604b04e018 100644 --- a/internal/component/prometheus/exporter/kafka/kafka.go +++ b/internal/component/prometheus/exporter/kafka/kafka.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) @@ -31,7 +31,7 @@ type Arguments struct { UseSASL bool `river:"use_sasl,attr,optional"` UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` SASLUsername string `river:"sasl_username,attr,optional"` - SASLPassword rivertypes.Secret `river:"sasl_password,attr,optional"` + SASLPassword alloytypes.Secret `river:"sasl_password,attr,optional"` SASLMechanism string `river:"sasl_mechanism,attr,optional"` UseTLS bool `river:"use_tls,attr,optional"` CAFile string `river:"ca_file,attr,optional"` diff --git a/internal/component/prometheus/exporter/mongodb/mongodb.go b/internal/component/prometheus/exporter/mongodb/mongodb.go index 888a569f3e..cd46d1ebe1 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -27,7 +27,7 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns } type Arguments struct { - URI rivertypes.Secret `river:"mongodb_uri,attr"` + URI alloytypes.Secret `river:"mongodb_uri,attr"` DirectConnect bool `river:"direct_connect,attr,optional"` DiscoveringMode bool `river:"discovering_mode,attr,optional"` TLSBasicAuthConfigPath string `river:"tls_basic_auth_config_path,attr,optional"` diff --git a/internal/component/prometheus/exporter/mssql/mssql.go b/internal/component/prometheus/exporter/mssql/mssql.go index 0b159d876a..dad15af1db 100644 --- a/internal/component/prometheus/exporter/mssql/mssql.go +++ b/internal/component/prometheus/exporter/mssql/mssql.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mssql" "github.com/grafana/agent/internal/util" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "gopkg.in/yaml.v2" ) @@ -42,11 +42,11 @@ var DefaultArguments = Arguments{ // Arguments controls the mssql exporter. type Arguments struct { - ConnectionString rivertypes.Secret `river:"connection_string,attr"` + ConnectionString alloytypes.Secret `river:"connection_string,attr"` MaxIdleConnections int `river:"max_idle_connections,attr,optional"` MaxOpenConnections int `river:"max_open_connections,attr,optional"` Timeout time.Duration `river:"timeout,attr,optional"` - QueryConfig rivertypes.OptionalSecret `river:"query_config,attr,optional"` + QueryConfig alloytypes.OptionalSecret `river:"query_config,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/mssql/mssql_test.go b/internal/component/prometheus/exporter/mssql/mssql_test.go index d6f99f0623..c0fd7b8e25 100644 --- a/internal/component/prometheus/exporter/mssql/mssql_test.go +++ b/internal/component/prometheus/exporter/mssql/mssql_test.go @@ -7,7 +7,7 @@ import ( "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/internal/static/integrations/mssql" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -25,7 +25,7 @@ func TestRiverUnmarshal(t *testing.T) { require.NoError(t, err) expected := Arguments{ - ConnectionString: rivertypes.Secret("sqlserver://user:pass@localhost:1433"), + ConnectionString: alloytypes.Secret("sqlserver://user:pass@localhost:1433"), MaxIdleConnections: 3, MaxOpenConnections: 3, Timeout: 10 * time.Second, @@ -49,7 +49,7 @@ func TestRiverUnmarshalWithInlineQueryConfig(t *testing.T) { err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, alloytypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) require.Equal(t, 3, args.MaxIdleConnections) require.Equal(t, 3, args.MaxOpenConnections) require.Equal(t, 10*time.Second, args.Timeout) @@ -78,7 +78,7 @@ func TestRiverUnmarshalWithInlineQueryConfigYaml(t *testing.T) { err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) require.NoError(t, err) - require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, alloytypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) require.Equal(t, 3, args.MaxIdleConnections) require.Equal(t, 3, args.MaxOpenConnections) require.Equal(t, 10*time.Second, args.Timeout) @@ -145,7 +145,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "invalid max open connections", args: Arguments{ - ConnectionString: rivertypes.Secret("test"), + ConnectionString: alloytypes.Secret("test"), MaxIdleConnections: 1, MaxOpenConnections: 0, Timeout: 10 * time.Second, @@ -155,7 +155,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "invalid max idle connections", args: Arguments{ - ConnectionString: rivertypes.Secret("test"), + ConnectionString: alloytypes.Secret("test"), MaxIdleConnections: 0, MaxOpenConnections: 1, Timeout: 10 * time.Second, @@ -165,7 +165,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "invalid timeout", args: Arguments{ - ConnectionString: rivertypes.Secret("test"), + ConnectionString: alloytypes.Secret("test"), MaxIdleConnections: 1, MaxOpenConnections: 1, Timeout: 0, @@ -175,11 +175,11 @@ func TestArgumentsValidate(t *testing.T) { { name: "valid", args: Arguments{ - ConnectionString: rivertypes.Secret("test"), + ConnectionString: alloytypes.Secret("test"), MaxIdleConnections: 1, MaxOpenConnections: 1, Timeout: 10 * time.Second, - QueryConfig: rivertypes.OptionalSecret{ + QueryConfig: alloytypes.OptionalSecret{ Value: `{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time" } ] }`, }, }, @@ -209,11 +209,11 @@ metrics: query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time"` args := Arguments{ - ConnectionString: rivertypes.Secret("sqlserver://user:pass@localhost:1433"), + ConnectionString: alloytypes.Secret("sqlserver://user:pass@localhost:1433"), MaxIdleConnections: 1, MaxOpenConnections: 1, Timeout: 10 * time.Second, - QueryConfig: rivertypes.OptionalSecret{ + QueryConfig: alloytypes.OptionalSecret{ Value: strQueryConfig, }, } diff --git a/internal/component/prometheus/exporter/mysql/mysql.go b/internal/component/prometheus/exporter/mysql/mysql.go index 1145133186..6f24e5d482 100644 --- a/internal/component/prometheus/exporter/mysql/mysql.go +++ b/internal/component/prometheus/exporter/mysql/mysql.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -58,7 +58,7 @@ var DefaultArguments = Arguments{ // Arguments controls the mysql component. type Arguments struct { // DataSourceName to use to connect to MySQL. - DataSourceName rivertypes.Secret `river:"data_source_name,attr,optional"` + DataSourceName alloytypes.Secret `river:"data_source_name,attr,optional"` // Collectors to mark as enabled in addition to the default. EnableCollectors []string `river:"enable_collectors,attr,optional"` diff --git a/internal/component/prometheus/exporter/mysql/mysql_test.go b/internal/component/prometheus/exporter/mysql/mysql_test.go index 4b0940c158..eb2664d215 100644 --- a/internal/component/prometheus/exporter/mysql/mysql_test.go +++ b/internal/component/prometheus/exporter/mysql/mysql_test.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) @@ -160,14 +160,14 @@ func TestDefaultsSame(t *testing.T) { func TestValidate_ValidDataSource(t *testing.T) { args := Arguments{ - DataSourceName: rivertypes.Secret("root:secret_password@tcp(localhost:3306)/mydb"), + DataSourceName: alloytypes.Secret("root:secret_password@tcp(localhost:3306)/mydb"), } require.NoError(t, args.Validate()) } func TestValidate_InvalidDataSource(t *testing.T) { args := Arguments{ - DataSourceName: rivertypes.Secret("root:secret_password@invalid/mydb"), + DataSourceName: alloytypes.Secret("root:secret_password@invalid/mydb"), } require.Error(t, args.Validate()) } diff --git a/internal/component/prometheus/exporter/oracledb/oracledb.go b/internal/component/prometheus/exporter/oracledb/oracledb.go index 3e316988c2..21ea2909b6 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -44,7 +44,7 @@ var ( // Arguments controls the oracledb exporter. type Arguments struct { - ConnectionString rivertypes.Secret `river:"connection_string,attr"` + ConnectionString alloytypes.Secret `river:"connection_string,attr"` MaxIdleConns int `river:"max_idle_conns,attr,optional"` MaxOpenConns int `river:"max_open_conns,attr,optional"` QueryTimeout int `river:"query_timeout,attr,optional"` diff --git a/internal/component/prometheus/exporter/oracledb/oracledb_test.go b/internal/component/prometheus/exporter/oracledb/oracledb_test.go index 52c1291846..aea1120890 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb_test.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) @@ -24,7 +24,7 @@ func TestRiverUnmarshal(t *testing.T) { require.NoError(t, err) expected := Arguments{ - ConnectionString: rivertypes.Secret("oracle://user:password@localhost:1521/orcl.localnet"), + ConnectionString: alloytypes.Secret("oracle://user:password@localhost:1521/orcl.localnet"), MaxIdleConns: 0, MaxOpenConns: 10, QueryTimeout: 5, @@ -43,7 +43,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "no connection string", args: Arguments{ - ConnectionString: rivertypes.Secret(""), + ConnectionString: alloytypes.Secret(""), }, wantErr: true, err: errNoConnectionString, @@ -51,7 +51,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "unable to parse connection string", args: Arguments{ - ConnectionString: rivertypes.Secret("oracle://user password@localhost:1521/orcl.localnet"), + ConnectionString: alloytypes.Secret("oracle://user password@localhost:1521/orcl.localnet"), }, wantErr: true, err: errors.New("unable to parse connection string:"), @@ -59,7 +59,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "unexpected scheme", args: Arguments{ - ConnectionString: rivertypes.Secret("notoracle://user:password@localhost:1521/orcl.localnet"), + ConnectionString: alloytypes.Secret("notoracle://user:password@localhost:1521/orcl.localnet"), }, wantErr: true, err: errors.New("unexpected scheme of type"), @@ -67,7 +67,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "no host name", args: Arguments{ - ConnectionString: rivertypes.Secret("oracle://user:password@:1521/orcl.localnet"), + ConnectionString: alloytypes.Secret("oracle://user:password@:1521/orcl.localnet"), }, wantErr: true, err: errNoHostname, @@ -75,7 +75,7 @@ func TestArgumentsValidate(t *testing.T) { { name: "valid", args: Arguments{ - ConnectionString: rivertypes.Secret("oracle://user:password@localhost:1521/orcl.localnet"), + ConnectionString: alloytypes.Secret("oracle://user:password@localhost:1521/orcl.localnet"), }, wantErr: false, }, diff --git a/internal/component/prometheus/exporter/postgres/postgres.go b/internal/component/prometheus/exporter/postgres/postgres.go index 4bd4b5557a..d5e2e0d1b7 100644 --- a/internal/component/prometheus/exporter/postgres/postgres.go +++ b/internal/component/prometheus/exporter/postgres/postgres.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/lib/pq" config_util "github.com/prometheus/common/config" ) @@ -77,7 +77,7 @@ type Arguments struct { // DataSourceNames to use to connect to Postgres. This is marked optional because it // may also be supplied by the POSTGRES_EXPORTER_DATA_SOURCE_NAME env var, // though it is not recommended to do so. - DataSourceNames []rivertypes.Secret `river:"data_source_names,attr,optional"` + DataSourceNames []alloytypes.Secret `river:"data_source_names,attr,optional"` // Attributes DisableSettingsMetrics bool `river:"disable_settings_metrics,attr,optional"` diff --git a/internal/component/prometheus/exporter/postgres/postgres_test.go b/internal/component/prometheus/exporter/postgres/postgres_test.go index 0b2ccbe928..758d0d3d23 100644 --- a/internal/component/prometheus/exporter/postgres/postgres_test.go +++ b/internal/component/prometheus/exporter/postgres/postgres_test.go @@ -5,6 +5,7 @@ import ( "github.com/grafana/agent/internal/static/integrations/postgres_exporter" river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/alloytypes" rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -28,7 +29,7 @@ func TestRiverConfigUnmarshal(t *testing.T) { require.NoError(t, err) expected := Arguments{ - DataSourceNames: []rivertypes.Secret{rivertypes.Secret("postgresql://username:password@localhost:5432/database?sslmode=disable")}, + DataSourceNames: []alloytypes.Secret{rivertypes.Secret("postgresql://username:password@localhost:5432/database?sslmode=disable")}, DisableSettingsMetrics: true, AutoDiscovery: AutoDiscovery{ Enabled: false, diff --git a/internal/component/prometheus/exporter/redis/redis.go b/internal/component/prometheus/exporter/redis/redis.go index 394727a529..cfb8f7459a 100644 --- a/internal/component/prometheus/exporter/redis/redis.go +++ b/internal/component/prometheus/exporter/redis/redis.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -52,7 +52,7 @@ type Arguments struct { // are the only fields that are relevant to the exporter struct. RedisAddr string `river:"redis_addr,attr"` RedisUser string `river:"redis_user,attr,optional"` - RedisPassword rivertypes.Secret `river:"redis_password,attr,optional"` + RedisPassword alloytypes.Secret `river:"redis_password,attr,optional"` RedisPasswordFile string `river:"redis_password_file,attr,optional"` RedisPasswordMapFile string `river:"redis_password_map_file,attr,optional"` Namespace string `river:"namespace,attr,optional"` diff --git a/internal/component/prometheus/exporter/snmp/snmp.go b/internal/component/prometheus/exporter/snmp/snmp.go index 4025c98d69..286ae7fb35 100644 --- a/internal/component/prometheus/exporter/snmp/snmp.go +++ b/internal/component/prometheus/exporter/snmp/snmp.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" "gopkg.in/yaml.v2" ) @@ -113,7 +113,7 @@ func (w WalkParams) Convert() map[string]snmp_config.WalkParams { type Arguments struct { ConfigFile string `river:"config_file,attr,optional"` - Config rivertypes.OptionalSecret `river:"config,attr,optional"` + Config alloytypes.OptionalSecret `river:"config,attr,optional"` Targets TargetBlock `river:"target,block"` WalkParams WalkParams `river:"walk_param,block,optional"` ConfigStruct snmp_config.Config diff --git a/internal/component/prometheus/exporter/snowflake/snowflake.go b/internal/component/prometheus/exporter/snowflake/snowflake.go index aff54c510f..f377d4edd9 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) @@ -35,7 +35,7 @@ var DefaultArguments = Arguments{ type Arguments struct { AccountName string `river:"account_name,attr"` Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` Role string `river:"role,attr,optional"` Warehouse string `river:"warehouse,attr"` } diff --git a/internal/component/prometheus/exporter/snowflake/snowflake_test.go b/internal/component/prometheus/exporter/snowflake/snowflake_test.go index 62ca26d739..98b6927d77 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake_test.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake_test.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) @@ -26,7 +26,7 @@ func TestRiverUnmarshal(t *testing.T) { expected := Arguments{ AccountName: "some_account", Username: "some_user", - Password: rivertypes.Secret("some_password"), + Password: alloytypes.Secret("some_password"), Role: "some_role", Warehouse: "some_warehouse", } diff --git a/internal/component/prometheus/exporter/squid/squid.go b/internal/component/prometheus/exporter/squid/squid.go index 4945e59eb7..f5ba5a133c 100644 --- a/internal/component/prometheus/exporter/squid/squid.go +++ b/internal/component/prometheus/exporter/squid/squid.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/static/integrations" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) @@ -32,7 +32,7 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns type Arguments struct { SquidAddr string `river:"address,attr"` SquidUser string `river:"username,attr,optional"` - SquidPassword rivertypes.Secret `river:"password,attr,optional"` + SquidPassword alloytypes.Secret `river:"password,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/squid/squid_test.go b/internal/component/prometheus/exporter/squid/squid_test.go index 39e8a30cf2..9d0714a3e4 100644 --- a/internal/component/prometheus/exporter/squid/squid_test.go +++ b/internal/component/prometheus/exporter/squid/squid_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/static/integrations/squid_exporter" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) @@ -25,7 +25,7 @@ func TestRiverUnmarshal(t *testing.T) { expected := Arguments{ SquidAddr: "some_address:port", SquidUser: "some_user", - SquidPassword: rivertypes.Secret("some_password"), + SquidPassword: alloytypes.Secret("some_password"), } require.Equal(t, expected, args) diff --git a/internal/component/prometheus/remotewrite/types.go b/internal/component/prometheus/remotewrite/types.go index 1b681c4c82..79b1649e2d 100644 --- a/internal/component/prometheus/remotewrite/types.go +++ b/internal/component/prometheus/remotewrite/types.go @@ -8,7 +8,7 @@ import ( types "github.com/grafana/agent/internal/component/common/config" flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/google/uuid" common "github.com/prometheus/common/config" @@ -323,7 +323,7 @@ func (a *AzureADConfig) toPrometheusType() *azuread.AzureADConfig { type SigV4Config struct { Region string `river:"region,attr,optional"` AccessKey string `river:"access_key,attr,optional"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` Profile string `river:"profile,attr,optional"` RoleARN string `river:"role_arn,attr,optional"` } diff --git a/internal/component/remote/http/http.go b/internal/component/remote/http/http.go index 9bb83b4c71..2085f054fc 100644 --- a/internal/component/remote/http/http.go +++ b/internal/component/remote/http/http.go @@ -16,7 +16,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/useragent" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) @@ -82,7 +82,7 @@ func (args *Arguments) Validate() error { // Exports holds settings exported by remote.http. type Exports struct { - Content rivertypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `river:"content,attr"` } // Component implements the remote.http component. @@ -231,7 +231,7 @@ func (c *Component) pollError() error { stringContent := strings.TrimSpace(string(bb)) newExports := Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: c.args.IsSecret, Value: stringContent, }, diff --git a/internal/component/remote/http/http_test.go b/internal/component/remote/http/http_test.go index 1b2d4b7df1..b4ed7a5eef 100644 --- a/internal/component/remote/http/http_test.go +++ b/internal/component/remote/http/http_test.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" ) @@ -71,7 +71,7 @@ func Test(t *testing.T) { } requireExports(http_component.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "Hello, world!", }, @@ -87,7 +87,7 @@ func Test(t *testing.T) { }) require.NoError(t, ctrl.WaitExports(time.Second), "component didn't update exports") requireExports(http_component.Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: false, Value: "Testing!\nMethod: PUT\nHeader: value", }, diff --git a/internal/component/remote/kubernetes/kubernetes.go b/internal/component/remote/kubernetes/kubernetes.go index 94c390f179..42761a02fc 100644 --- a/internal/component/remote/kubernetes/kubernetes.go +++ b/internal/component/remote/kubernetes/kubernetes.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/kubernetes" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" client_go "k8s.io/client-go/kubernetes" @@ -60,7 +60,7 @@ func (args *Arguments) Validate() error { // Exports holds settings exported by this component. type Exports struct { - Data map[string]rivertypes.OptionalSecret `river:"data,attr"` + Data map[string]alloytypes.OptionalSecret `river:"data,attr"` } // Component implements the remote.kubernetes.* component. @@ -171,14 +171,14 @@ func (c *Component) pollError() error { ctx, cancel := context.WithTimeout(context.Background(), c.args.PollTimeout) defer cancel() - data := map[string]rivertypes.OptionalSecret{} + data := map[string]alloytypes.OptionalSecret{} if c.kind == TypeSecret { secret, err := c.client.CoreV1().Secrets(c.args.Namespace).Get(ctx, c.args.Name, v1.GetOptions{}) if err != nil { return err } for k, v := range secret.Data { - data[k] = rivertypes.OptionalSecret{ + data[k] = alloytypes.OptionalSecret{ Value: string(v), IsSecret: true, } @@ -189,7 +189,7 @@ func (c *Component) pollError() error { return err } for k, v := range cmap.Data { - data[k] = rivertypes.OptionalSecret{ + data[k] = alloytypes.OptionalSecret{ Value: v, IsSecret: false, } diff --git a/internal/component/remote/s3/s3.go b/internal/component/remote/s3/s3.go index 4bd1b72fa3..d6ae76810c 100644 --- a/internal/component/remote/s3/s3.go +++ b/internal/component/remote/s3/s3.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/client_golang/prometheus" ) @@ -205,7 +205,7 @@ func (s *Component) handleContentPolling(newContent string, err error) { if err == nil { s.opts.OnStateChange(Exports{ - Content: rivertypes.OptionalSecret{ + Content: alloytypes.OptionalSecret{ IsSecret: s.args.IsSecret, Value: newContent, }, diff --git a/internal/component/remote/s3/types.go b/internal/component/remote/s3/types.go index 8341d08a3a..2722e772af 100644 --- a/internal/component/remote/s3/types.go +++ b/internal/component/remote/s3/types.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) // Arguments implements the input for the S3 component. @@ -22,7 +22,7 @@ type Arguments struct { // Client implements specific AWS configuration options type Client struct { AccessKey string `river:"key,attr,optional"` - Secret rivertypes.Secret `river:"secret,attr,optional"` + Secret alloytypes.Secret `river:"secret,attr,optional"` Endpoint string `river:"endpoint,attr,optional"` DisableSSL bool `river:"disable_ssl,attr,optional"` UsePathStyle bool `river:"use_path_style,attr,optional"` @@ -52,5 +52,5 @@ func (a *Arguments) Validate() error { // Exports implements the file content type Exports struct { - Content rivertypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `river:"content,attr"` } diff --git a/internal/component/remote/vault/auth.go b/internal/component/remote/vault/auth.go index 13a955799a..9d27fe26b9 100644 --- a/internal/component/remote/vault/auth.go +++ b/internal/component/remote/vault/auth.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" vault "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/api/auth/approle" "github.com/hashicorp/vault/api/auth/aws" @@ -70,7 +70,7 @@ func (a *AuthArguments) authMethod() authMethod { // AuthToken authenticates against Vault with a token. type AuthToken struct { - Token rivertypes.Secret `river:"token,attr"` + Token alloytypes.Secret `river:"token,attr"` } func (a *AuthToken) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*vault.Secret, error) { @@ -81,7 +81,7 @@ func (a *AuthToken) vaultAuthenticate(ctx context.Context, cli *vault.Client) (* // AuthAppRole authenticates against Vault with AppRole. type AuthAppRole struct { RoleID string `river:"role_id,attr"` - Secret rivertypes.Secret `river:"secret,attr"` + Secret alloytypes.Secret `river:"secret,attr"` WrappingToken bool `river:"wrapping_token,attr,optional"` MountPath string `river:"mount_path,attr,optional"` } @@ -370,7 +370,7 @@ func (a *AuthKubernetes) vaultAuthenticate(ctx context.Context, cli *vault.Clien // AuthLDAP authenticates against Vault with LDAP. type AuthLDAP struct { Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` MountPath string `river:"mount_path,attr,optional"` } @@ -407,7 +407,7 @@ func (a *AuthLDAP) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*v // AuthUserPass authenticates against Vault with a username and password. type AuthUserPass struct { Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` MountPath string `river:"mount_path,attr,optional"` } @@ -445,7 +445,7 @@ func (a *AuthUserPass) vaultAuthenticate(ctx context.Context, cli *vault.Client) type AuthCustom struct { // Path to use for logging in (e.g., auth/kubernetes/login, etc.) Path string `river:"path,attr"` - Data map[string]rivertypes.Secret `river:"data,attr"` + Data map[string]alloytypes.Secret `river:"data,attr"` } // Login implements vault.AuthMethod. diff --git a/internal/component/remote/vault/vault.go b/internal/component/remote/vault/vault.go index 2da4d882a5..ccc06173e5 100644 --- a/internal/component/remote/vault/vault.go +++ b/internal/component/remote/vault/vault.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/oklog/run" vault "github.com/hashicorp/vault/api" @@ -126,7 +126,7 @@ type Exports struct { // // However, it seems that most secrets engines don't actually return // arbitrary data, so this limitation shouldn't cause any issues in practice. - Data map[string]rivertypes.Secret `river:"data,attr"` + Data map[string]alloytypes.Secret `river:"data,attr"` } // Component implements the remote.vault component. @@ -276,15 +276,15 @@ func (c *Component) getSecret(ctx context.Context, cli *vault.Client) (*vault.Se // controller. func (c *Component) exportSecret(secret *vault.Secret) { newExports := Exports{ - Data: make(map[string]rivertypes.Secret), + Data: make(map[string]alloytypes.Secret), } for key, value := range secret.Data { switch value := value.(type) { case string: - newExports.Data[key] = rivertypes.Secret(value) + newExports.Data[key] = alloytypes.Secret(value) case []byte: - newExports.Data[key] = rivertypes.Secret(value) + newExports.Data[key] = alloytypes.Secret(value) default: // Non-string secrets are ignored. diff --git a/internal/component/remote/vault/vault_test.go b/internal/component/remote/vault/vault_test.go index 8b9a234767..b1279c2ddb 100644 --- a/internal/component/remote/vault/vault_test.go +++ b/internal/component/remote/vault/vault_test.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -61,8 +61,8 @@ func Test_GetSecrets(t *testing.T) { var ( expectExports = Exports{ - Data: map[string]rivertypes.Secret{ - "key": rivertypes.Secret("value"), + Data: map[string]alloytypes.Secret{ + "key": alloytypes.Secret("value"), }, } actualExports = ctrl.Exports().(Exports) @@ -112,8 +112,8 @@ func Test_PollSecrets(t *testing.T) { var ( expectExports = Exports{ - Data: map[string]rivertypes.Secret{ - "key": rivertypes.Secret("value"), + Data: map[string]alloytypes.Secret{ + "key": alloytypes.Secret("value"), }, } actualExports = ctrl.Exports().(Exports) @@ -132,8 +132,8 @@ func Test_PollSecrets(t *testing.T) { var ( expectExports = Exports{ - Data: map[string]rivertypes.Secret{ - "key": rivertypes.Secret("newvalue"), + Data: map[string]alloytypes.Secret{ + "key": alloytypes.Secret("newvalue"), }, } actualExports = ctrl.Exports().(Exports) diff --git a/internal/converter/internal/common/http_client_config.go b/internal/converter/internal/common/http_client_config.go index 6f1631cf74..f144f748fc 100644 --- a/internal/converter/internal/common/http_client_config.go +++ b/internal/converter/internal/common/http_client_config.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/converter/diag" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) @@ -18,7 +18,7 @@ func ToHttpClientConfig(httpClientConfig *prom_config.HTTPClientConfig) *config. BasicAuth: toBasicAuth(httpClientConfig.BasicAuth), Authorization: toAuthorization(httpClientConfig.Authorization), OAuth2: toOAuth2(httpClientConfig.OAuth2), - BearerToken: rivertypes.Secret(httpClientConfig.BearerToken), + BearerToken: alloytypes.Secret(httpClientConfig.BearerToken), BearerTokenFile: httpClientConfig.BearerTokenFile, ProxyConfig: ToProxyConfig(httpClientConfig.ProxyConfig), TLSConfig: *ToTLSConfig(&httpClientConfig.TLSConfig), @@ -44,7 +44,7 @@ func toBasicAuth(basicAuth *prom_config.BasicAuth) *config.BasicAuth { return &config.BasicAuth{ Username: basicAuth.Username, - Password: rivertypes.Secret(basicAuth.Password), + Password: alloytypes.Secret(basicAuth.Password), PasswordFile: basicAuth.PasswordFile, } } @@ -56,7 +56,7 @@ func toAuthorization(authorization *prom_config.Authorization) *config.Authoriza return &config.Authorization{ Type: authorization.Type, - Credentials: rivertypes.Secret(authorization.Credentials), + Credentials: alloytypes.Secret(authorization.Credentials), CredentialsFile: authorization.CredentialsFile, } } @@ -68,7 +68,7 @@ func toOAuth2(oAuth2 *prom_config.OAuth2) *config.OAuth2Config { return &config.OAuth2Config{ ClientID: oAuth2.ClientID, - ClientSecret: rivertypes.Secret(oAuth2.ClientSecret), + ClientSecret: alloytypes.Secret(oAuth2.ClientSecret), ClientSecretFile: oAuth2.ClientSecretFile, Scopes: oAuth2.Scopes, TokenURL: oAuth2.TokenURL, @@ -108,14 +108,14 @@ func toProxyConnectHeader(proxyConnectHeader prom_config.Header) config.Header { } header := config.Header{ - Header: make(map[string][]rivertypes.Secret), + Header: make(map[string][]alloytypes.Secret), } for name, values := range proxyConnectHeader { - var s []rivertypes.Secret + var s []alloytypes.Secret if values != nil { - s = make([]rivertypes.Secret, 0, len(values)) + s = make([]alloytypes.Secret, 0, len(values)) for _, value := range values { - s = append(s, rivertypes.Secret(value)) + s = append(s, alloytypes.Secret(value)) } } header.Header[name] = s @@ -133,7 +133,7 @@ func ToTLSConfig(tlsConfig *prom_config.TLSConfig) *config.TLSConfig { CAFile: tlsConfig.CAFile, Cert: tlsConfig.Cert, CertFile: tlsConfig.CertFile, - Key: rivertypes.Secret(tlsConfig.Key), + Key: alloytypes.Secret(tlsConfig.Key), KeyFile: tlsConfig.KeyFile, ServerName: tlsConfig.ServerName, InsecureSkipVerify: tlsConfig.InsecureSkipVerify, diff --git a/internal/converter/internal/common/river_utils.go b/internal/converter/internal/common/river_utils.go index 82544c8b85..09ae0d2d80 100644 --- a/internal/converter/internal/common/river_utils.go +++ b/internal/converter/internal/common/river_utils.go @@ -6,6 +6,7 @@ import ( "strings" river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/parser" "github.com/grafana/alloy/syntax/printer" "github.com/grafana/alloy/syntax/scanner" @@ -14,7 +15,6 @@ import ( flow_relabel "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/converter/diag" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/token/builder" ) @@ -38,15 +38,15 @@ func NewBlockWithOverrideFn(name []string, label string, args component.Argument func getValueOverrideHook() builder.ValueOverrideHook { return func(val interface{}) interface{} { switch value := val.(type) { - case rivertypes.Secret: + case alloytypes.Secret: return string(value) - case []rivertypes.Secret: + case []alloytypes.Secret: secrets := make([]string, 0, len(value)) for _, secret := range value { secrets = append(secrets, string(secret)) } return secrets - case map[string][]rivertypes.Secret: + case map[string][]alloytypes.Secret: secrets := make(map[string][]string, len(value)) for k, v := range value { secrets[k] = make([]string, 0, len(v)) diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go index 52083e204a..9bbd6c29d2 100644 --- a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/basic" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" "go.opentelemetry.io/collector/component" ) @@ -43,6 +43,6 @@ func (basicAuthConverterConverter) ConvertAndAppend(state *state, id component.I func toBasicAuthExtension(cfg *basicauthextension.Config) *basic.Arguments { return &basic.Arguments{ Username: cfg.ClientAuth.Username, - Password: rivertypes.Secret(string(cfg.ClientAuth.Password)), + Password: alloytypes.Secret(string(cfg.ClientAuth.Password)), } } diff --git a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go index b754639444..5b498b4fa5 100644 --- a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/bearer" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/token/builder" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" "go.opentelemetry.io/collector/component" @@ -41,7 +41,7 @@ func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *state, id compo args, fileContents := toBearerTokenAuthExtensionWithFilename(state, bcfg) overrideHook := func(val interface{}) interface{} { switch value := val.(type) { - case rivertypes.Secret: + case alloytypes.Secret: return common.CustomTokenizer{Expr: fileContents} default: return value @@ -62,7 +62,7 @@ func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *state, id compo func toBearerTokenAuthExtension(cfg *bearertokenauthextension.Config) *bearer.Arguments { return &bearer.Arguments{ Scheme: cfg.Scheme, - Token: rivertypes.Secret(string(cfg.BearerToken)), + Token: alloytypes.Secret(string(cfg.BearerToken)), } } func toBearerTokenAuthExtensionWithFilename(state *state, cfg *bearertokenauthextension.Config) (*bearer.Arguments, string) { diff --git a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go index 60f4cdc3e6..658270ff1e 100644 --- a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go +++ b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/headers" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "go.opentelemetry.io/collector/component" ) @@ -43,9 +43,9 @@ func (headersSetterExtensionConverter) ConvertAndAppend(state *state, id compone func toHeadersSetterExtension(cfg *headerssetterextension.Config) *headers.Arguments { res := make([]headers.Header, 0, len(cfg.HeadersConfig)) for _, h := range cfg.HeadersConfig { - var val *rivertypes.OptionalSecret + var val *alloytypes.OptionalSecret if h.Value != nil { - val = &rivertypes.OptionalSecret{ + val = &alloytypes.OptionalSecret{ IsSecret: false, // we default to non-secret so that the converted configuration includes the actual value instead of (secret). Value: *h.Value, } diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index 50faf5a373..7236438908 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" @@ -93,7 +93,7 @@ func toKafkaPlaintext(cfg map[string]any) *kafka.PlaintextArguments { return &kafka.PlaintextArguments{ Username: cfg["username"].(string), - Password: rivertypes.Secret(cfg["password"].(string)), + Password: alloytypes.Secret(cfg["password"].(string)), } } @@ -104,7 +104,7 @@ func toKafkaSASL(cfg map[string]any) *kafka.SASLArguments { return &kafka.SASLArguments{ Username: cfg["username"].(string), - Password: rivertypes.Secret(cfg["password"].(string)), + Password: alloytypes.Secret(cfg["password"].(string)), Mechanism: cfg["mechanism"].(string), Version: cfg["version"].(int), AWSMSK: toKafkaAWSMSK(encodeMapstruct(cfg["aws_msk"])), @@ -147,7 +147,7 @@ func toKafkaKerberos(cfg map[string]any) *kafka.KerberosArguments { Realm: cfg["realm"].(string), UseKeyTab: cfg["use_keytab"].(bool), Username: cfg["username"].(string), - Password: rivertypes.Secret(cfg["password"].(string)), + Password: alloytypes.Secret(cfg["password"].(string)), ConfigPath: cfg["config_file"].(string), KeyTabPath: cfg["keytab_file"].(string), } diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go index 1373df33c6..94eda15664 100644 --- a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" "go.opentelemetry.io/collector/component" ) @@ -43,7 +43,7 @@ func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *state, id comp func toOAuth2ClientAuthExtension(cfg *oauth2clientauthextension.Config) *oauth2.Arguments { return &oauth2.Arguments{ ClientID: cfg.ClientID, - ClientSecret: rivertypes.Secret(cfg.ClientSecret), + ClientSecret: alloytypes.Secret(cfg.ClientSecret), TokenURL: cfg.TokenURL, EndpointParams: cfg.EndpointParams, Scopes: cfg.Scopes, diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index 768787d175..6e5202aeca 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/otlp" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" @@ -104,7 +104,7 @@ func toTLSSetting(cfg configtls.TLSSetting) otelcol.TLSSetting { CAFile: cfg.CAFile, Cert: string(cfg.CertPem), CertFile: cfg.CertFile, - Key: rivertypes.Secret(cfg.KeyPem), + Key: alloytypes.Secret(cfg.KeyPem), KeyFile: cfg.KeyFile, MinVersion: cfg.MinVersion, MaxVersion: cfg.MaxVersion, diff --git a/internal/converter/internal/prometheusconvert/component/azure.go b/internal/converter/internal/prometheusconvert/component/azure.go index 4601b32c85..5f98bbee56 100644 --- a/internal/converter/internal/prometheusconvert/component/azure.go +++ b/internal/converter/internal/prometheusconvert/component/azure.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_azure "github.com/prometheus/prometheus/discovery/azure" ) @@ -58,6 +58,6 @@ func toDiscoveryAzureOauth2(clientId string, tenantId string, clientSecret strin return &azure.OAuth{ ClientID: clientId, TenantID: tenantId, - ClientSecret: rivertypes.Secret(clientSecret), + ClientSecret: alloytypes.Secret(clientSecret), } } diff --git a/internal/converter/internal/prometheusconvert/component/consul.go b/internal/converter/internal/prometheusconvert/component/consul.go index 684af0dd14..6e6bf672e8 100644 --- a/internal/converter/internal/prometheusconvert/component/consul.go +++ b/internal/converter/internal/prometheusconvert/component/consul.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_consul "github.com/prometheus/prometheus/discovery/consul" ) @@ -31,14 +31,14 @@ func toDiscoveryConsul(sdConfig *prom_consul.SDConfig) *consul.Arguments { return &consul.Arguments{ Server: sdConfig.Server, - Token: rivertypes.Secret(sdConfig.Token), + Token: alloytypes.Secret(sdConfig.Token), Datacenter: sdConfig.Datacenter, Namespace: sdConfig.Namespace, Partition: sdConfig.Partition, TagSeparator: sdConfig.TagSeparator, Scheme: sdConfig.Scheme, Username: sdConfig.Username, - Password: rivertypes.Secret(sdConfig.Password), + Password: alloytypes.Secret(sdConfig.Password), AllowStale: sdConfig.AllowStale, Services: sdConfig.Services, ServiceTags: sdConfig.ServiceTags, diff --git a/internal/converter/internal/prometheusconvert/component/digitalocean.go b/internal/converter/internal/prometheusconvert/component/digitalocean.go index cb3cd27912..2d5d653d97 100644 --- a/internal/converter/internal/prometheusconvert/component/digitalocean.go +++ b/internal/converter/internal/prometheusconvert/component/digitalocean.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" prom_digitalocean "github.com/prometheus/prometheus/discovery/digitalocean" ) @@ -46,7 +46,7 @@ func toDiscoveryDigitalOcean(sdConfig *prom_digitalocean.SDConfig) *digitalocean return &digitalocean.Arguments{ RefreshInterval: time.Duration(sdConfig.RefreshInterval), Port: sdConfig.Port, - BearerToken: rivertypes.Secret(sdConfig.HTTPClientConfig.BearerToken), + BearerToken: alloytypes.Secret(sdConfig.HTTPClientConfig.BearerToken), BearerTokenFile: sdConfig.HTTPClientConfig.BearerTokenFile, ProxyConfig: common.ToProxyConfig(sdConfig.HTTPClientConfig.ProxyConfig), FollowRedirects: sdConfig.HTTPClientConfig.FollowRedirects, diff --git a/internal/converter/internal/prometheusconvert/component/ec2.go b/internal/converter/internal/prometheusconvert/component/ec2.go index d64000a594..46bf78ff11 100644 --- a/internal/converter/internal/prometheusconvert/component/ec2.go +++ b/internal/converter/internal/prometheusconvert/component/ec2.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -33,7 +33,7 @@ func toDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) *aws.EC2Arguments { Endpoint: sdConfig.Endpoint, Region: sdConfig.Region, AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), + SecretKey: alloytypes.Secret(sdConfig.SecretKey), Profile: sdConfig.Profile, RoleARN: sdConfig.RoleARN, RefreshInterval: time.Duration(sdConfig.RefreshInterval), diff --git a/internal/converter/internal/prometheusconvert/component/lightsail.go b/internal/converter/internal/prometheusconvert/component/lightsail.go index 60ad944ad2..765d504ad8 100644 --- a/internal/converter/internal/prometheusconvert/component/lightsail.go +++ b/internal/converter/internal/prometheusconvert/component/lightsail.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -33,7 +33,7 @@ func toDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) *aws.LightsailAr Endpoint: sdConfig.Endpoint, Region: sdConfig.Region, AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), + SecretKey: alloytypes.Secret(sdConfig.SecretKey), Profile: sdConfig.Profile, RoleARN: sdConfig.RoleARN, RefreshInterval: time.Duration(sdConfig.RefreshInterval), diff --git a/internal/converter/internal/prometheusconvert/component/marathon.go b/internal/converter/internal/prometheusconvert/component/marathon.go index c9249f1d42..21cb6be3ef 100644 --- a/internal/converter/internal/prometheusconvert/component/marathon.go +++ b/internal/converter/internal/prometheusconvert/component/marathon.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_marathon "github.com/prometheus/prometheus/discovery/marathon" ) @@ -31,7 +31,7 @@ func toDiscoveryMarathon(sdConfig *prom_marathon.SDConfig) *marathon.Arguments { return &marathon.Arguments{ Servers: sdConfig.Servers, - AuthToken: rivertypes.Secret(sdConfig.AuthToken), + AuthToken: alloytypes.Secret(sdConfig.AuthToken), AuthTokenFile: sdConfig.AuthTokenFile, RefreshInterval: time.Duration(sdConfig.RefreshInterval), HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), diff --git a/internal/converter/internal/prometheusconvert/component/openstack.go b/internal/converter/internal/prometheusconvert/component/openstack.go index a0a7455cf8..0ae31971ea 100644 --- a/internal/converter/internal/prometheusconvert/component/openstack.go +++ b/internal/converter/internal/prometheusconvert/component/openstack.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_openstack "github.com/prometheus/prometheus/discovery/openstack" ) @@ -33,14 +33,14 @@ func toDiscoveryOpenstack(sdConfig *prom_openstack.SDConfig) *openstack.Argument IdentityEndpoint: sdConfig.IdentityEndpoint, Username: sdConfig.Username, UserID: sdConfig.UserID, - Password: rivertypes.Secret(sdConfig.Password), + Password: alloytypes.Secret(sdConfig.Password), ProjectName: sdConfig.ProjectName, ProjectID: sdConfig.ProjectID, DomainName: sdConfig.DomainName, DomainID: sdConfig.DomainID, ApplicationCredentialName: sdConfig.ApplicationCredentialName, ApplicationCredentialID: sdConfig.ApplicationCredentialID, - ApplicationCredentialSecret: rivertypes.Secret(sdConfig.ApplicationCredentialSecret), + ApplicationCredentialSecret: alloytypes.Secret(sdConfig.ApplicationCredentialSecret), Role: string(sdConfig.Role), Region: sdConfig.Region, RefreshInterval: time.Duration(sdConfig.RefreshInterval), diff --git a/internal/converter/internal/prometheusconvert/component/ovhcloud.go b/internal/converter/internal/prometheusconvert/component/ovhcloud.go index c7f981d492..cef30cd058 100644 --- a/internal/converter/internal/prometheusconvert/component/ovhcloud.go +++ b/internal/converter/internal/prometheusconvert/component/ovhcloud.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" ) @@ -32,8 +32,8 @@ func toDiscoveryOvhcloud(sdConfig *prom_discovery.SDConfig) *ovhcloud.Arguments return &ovhcloud.Arguments{ Endpoint: sdConfig.Endpoint, ApplicationKey: sdConfig.ApplicationKey, - ApplicationSecret: rivertypes.Secret(sdConfig.ApplicationSecret), - ConsumerKey: rivertypes.Secret(sdConfig.ConsumerKey), + ApplicationSecret: alloytypes.Secret(sdConfig.ApplicationSecret), + ConsumerKey: alloytypes.Secret(sdConfig.ConsumerKey), RefreshInterval: time.Duration(sdConfig.RefreshInterval), Service: sdConfig.Service, } diff --git a/internal/converter/internal/prometheusconvert/component/remote_write.go b/internal/converter/internal/prometheusconvert/component/remote_write.go index ee1d2f7027..e2f44ad947 100644 --- a/internal/converter/internal/prometheusconvert/component/remote_write.go +++ b/internal/converter/internal/prometheusconvert/component/remote_write.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/sigv4" prom_config "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/storage/remote/azuread" @@ -117,7 +117,7 @@ func toSigV4(sigv4Config *sigv4.SigV4Config) *remotewrite.SigV4Config { return &remotewrite.SigV4Config{ Region: sigv4Config.Region, AccessKey: sigv4Config.AccessKey, - SecretKey: rivertypes.Secret(sigv4Config.SecretKey), + SecretKey: alloytypes.Secret(sigv4Config.SecretKey), Profile: sigv4Config.Profile, RoleARN: sigv4Config.RoleARN, } diff --git a/internal/converter/internal/prometheusconvert/component/scaleway.go b/internal/converter/internal/prometheusconvert/component/scaleway.go index eb2c1cbe6e..aef4bc0187 100644 --- a/internal/converter/internal/prometheusconvert/component/scaleway.go +++ b/internal/converter/internal/prometheusconvert/component/scaleway.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" prom_scaleway "github.com/prometheus/prometheus/discovery/scaleway" ) @@ -35,7 +35,7 @@ func toDiscoveryScaleway(sdConfig *prom_scaleway.SDConfig) *scaleway.Arguments { APIURL: sdConfig.APIURL, Zone: sdConfig.Zone, AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), + SecretKey: alloytypes.Secret(sdConfig.SecretKey), SecretKeyFile: sdConfig.SecretKeyFile, NameFilter: sdConfig.NameFilter, TagsFilter: sdConfig.TagsFilter, diff --git a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go index fe89162f14..00a6cd0f54 100644 --- a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go +++ b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/loki/source/cloudflare" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (s *ScrapeConfigBuilder) AppendCloudFlareConfig() { @@ -15,7 +15,7 @@ func (s *ScrapeConfigBuilder) AppendCloudFlareConfig() { } args := cloudflare.Arguments{ - APIToken: rivertypes.Secret(s.cfg.CloudflareConfig.APIToken), + APIToken: alloytypes.Secret(s.cfg.CloudflareConfig.APIToken), ZoneID: s.cfg.CloudflareConfig.ZoneID, Labels: convertPromLabels(s.cfg.CloudflareConfig.Labels), Workers: s.cfg.CloudflareConfig.Workers, @@ -26,7 +26,7 @@ func (s *ScrapeConfigBuilder) AppendCloudFlareConfig() { switch conv := val.(type) { case []loki.LogsReceiver: return common.CustomTokenizer{Expr: s.getOrNewLokiRelabel()} - case rivertypes.Secret: + case alloytypes.Secret: return string(conv) default: return val diff --git a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go index d346fe3993..2b7d29a1e1 100644 --- a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go +++ b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/discovery/consulagent" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" promtail_consulagent "github.com/grafana/loki/clients/pkg/promtail/discovery/consulagent" ) @@ -43,12 +43,12 @@ func toDiscoveryAgentConsul(sdConfig *promtail_consulagent.SDConfig, diags *diag return &consulagent.Arguments{ RefreshInterval: time.Duration(sdConfig.RefreshInterval), Server: sdConfig.Server, - Token: rivertypes.Secret(sdConfig.Token), + Token: alloytypes.Secret(sdConfig.Token), Datacenter: sdConfig.Datacenter, TagSeparator: sdConfig.TagSeparator, Scheme: sdConfig.Scheme, Username: sdConfig.Username, - Password: rivertypes.Secret(sdConfig.Password), + Password: alloytypes.Secret(sdConfig.Password), Services: sdConfig.Services, ServiceTags: sdConfig.ServiceTags, TLSConfig: *common.ToTLSConfig(&sdConfig.TLSConfig), diff --git a/internal/converter/internal/promtailconvert/internal/build/kafka.go b/internal/converter/internal/promtailconvert/internal/build/kafka.go index 296889d313..d00a6c98c4 100644 --- a/internal/converter/internal/promtailconvert/internal/build/kafka.go +++ b/internal/converter/internal/promtailconvert/internal/build/kafka.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/agent/internal/component/loki/source/kafka" "github.com/grafana/agent/internal/converter/internal/common" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" ) @@ -29,7 +29,7 @@ func (s *ScrapeConfigBuilder) AppendKafka() { switch value := val.(type) { case relabel.Rules: return common.CustomTokenizer{Expr: s.getOrNewDiscoveryRelabelRules()} - case rivertypes.Secret: + case alloytypes.Secret: return string(value) default: return val @@ -51,7 +51,7 @@ func convertKafkaAuthConfig(kafkaCfg *scrapeconfig.KafkaTargetConfig) kafka.Kafk SASLConfig: kafka.KafkaSASLConfig{ Mechanism: string(kafkaCfg.Authentication.SASLConfig.Mechanism), User: kafkaCfg.Authentication.SASLConfig.User, - Password: rivertypes.Secret(kafkaCfg.Authentication.SASLConfig.Password.String()), + Password: alloytypes.Secret(kafkaCfg.Authentication.SASLConfig.Password.String()), UseTLS: kafkaCfg.Authentication.SASLConfig.UseTLS, TLSConfig: *common.ToTLSConfig(&kafkaCfg.Authentication.SASLConfig.TLSConfig), }, diff --git a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go index 640a37945f..f3ebf96185 100644 --- a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go +++ b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" app_agent_receiver_v2 "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/scanner" ) @@ -51,7 +51,7 @@ func toAppAgentReceiverV2(config *app_agent_receiver_v2.Config) *receiver.Argume Host: config.Server.Host, Port: config.Server.Port, CORSAllowedOrigins: config.Server.CORSAllowedOrigins, - APIKey: rivertypes.Secret(config.Server.APIKey), + APIKey: alloytypes.Secret(config.Server.APIKey), MaxAllowedPayloadSize: units.Base2Bytes(config.Server.MaxAllowedPayloadSize), RateLimiting: receiver.RateLimitingArguments{ Enabled: config.Server.RateLimiting.Enabled, diff --git a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go index f7ffc9c364..2de0c0de82 100644 --- a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component/prometheus/exporter/blackbox" "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" blackbox_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendBlackboxExporter(config *blackbox_exporter.Config) discovery.Exports { @@ -18,7 +18,7 @@ func (b *ConfigBuilder) appendBlackboxExporter(config *blackbox_exporter.Config) func toBlackboxExporter(config *blackbox_exporter.Config) *blackbox.Arguments { return &blackbox.Arguments{ ConfigFile: config.BlackboxConfigFile, - Config: rivertypes.OptionalSecret{ + Config: alloytypes.OptionalSecret{ IsSecret: false, Value: string(config.BlackboxConfig), }, @@ -35,7 +35,7 @@ func (b *ConfigBuilder) appendBlackboxExporterV2(config *blackbox_exporter_v2.Co func toBlackboxExporterV2(config *blackbox_exporter_v2.Config) *blackbox.Arguments { return &blackbox.Arguments{ ConfigFile: config.BlackboxConfigFile, - Config: rivertypes.OptionalSecret{ + Config: alloytypes.OptionalSecret{ IsSecret: false, Value: string(config.BlackboxConfig), }, diff --git a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go index c2225f7a4f..4fa85919ad 100644 --- a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/elasticsearch" "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendElasticsearchExporter(config *elasticsearch_exporter.Config, instanceKey *string) discovery.Exports { @@ -37,7 +37,7 @@ func toElasticsearchExporter(config *elasticsearch_exporter.Config) *elasticsear if config.BasicAuth != nil { arg.BasicAuth = &commonCfg.BasicAuth{ Username: config.BasicAuth.Username, - Password: rivertypes.Secret(config.BasicAuth.Password), + Password: alloytypes.Secret(config.BasicAuth.Password), PasswordFile: config.BasicAuth.PasswordFile, } } diff --git a/internal/converter/internal/staticconvert/internal/build/github_exporter.go b/internal/converter/internal/staticconvert/internal/build/github_exporter.go index 17dc312a53..860f0bde0d 100644 --- a/internal/converter/internal/staticconvert/internal/build/github_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/github_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/github" "github.com/grafana/agent/internal/static/integrations/github_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendGithubExporter(config *github_exporter.Config, instanceKey *string) discovery.Exports { @@ -18,7 +18,7 @@ func toGithubExporter(config *github_exporter.Config) *github.Arguments { Repositories: config.Repositories, Organizations: config.Organizations, Users: config.Users, - APIToken: rivertypes.Secret(config.APIToken), + APIToken: alloytypes.Secret(config.APIToken), APITokenFile: config.APITokenFile, } } diff --git a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go index c520d44d85..4842e936b7 100644 --- a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/kafka" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendKafkaExporter(config *kafka_exporter.Config, instanceKey *string) discovery.Exports { @@ -18,7 +18,7 @@ func toKafkaExporter(config *kafka_exporter.Config) *kafka.Arguments { UseSASL: config.UseSASL, UseSASLHandshake: config.UseSASLHandshake, SASLUsername: config.SASLUsername, - SASLPassword: rivertypes.Secret(config.SASLPassword), + SASLPassword: alloytypes.Secret(config.SASLPassword), SASLMechanism: config.SASLMechanism, UseTLS: config.UseTLS, CAFile: config.CAFile, diff --git a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go index 9dc15d45ce..0dfb2328b8 100644 --- a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mongodb" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMongodbExporter(config *mongodb_exporter.Config, instanceKey *string) discovery.Exports { @@ -14,7 +14,7 @@ func (b *ConfigBuilder) appendMongodbExporter(config *mongodb_exporter.Config, i func toMongodbExporter(config *mongodb_exporter.Config) *mongodb.Arguments { return &mongodb.Arguments{ - URI: rivertypes.Secret(config.URI), + URI: alloytypes.Secret(config.URI), DirectConnect: config.DirectConnect, DiscoveringMode: config.DiscoveringMode, TLSBasicAuthConfigPath: config.TLSBasicAuthConfigPath, diff --git a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go index 9af089a6c8..6f5b4beb85 100644 --- a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mssql" mssql_exporter "github.com/grafana/agent/internal/static/integrations/mssql" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMssqlExporter(config *mssql_exporter.Config, instanceKey *string) discovery.Exports { @@ -14,7 +14,7 @@ func (b *ConfigBuilder) appendMssqlExporter(config *mssql_exporter.Config, insta func toMssqlExporter(config *mssql_exporter.Config) *mssql.Arguments { return &mssql.Arguments{ - ConnectionString: rivertypes.Secret(config.ConnectionString), + ConnectionString: alloytypes.Secret(config.ConnectionString), MaxIdleConnections: config.MaxIdleConnections, MaxOpenConnections: config.MaxOpenConnections, Timeout: config.Timeout, diff --git a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go index 9f688fe757..91cfee1ae0 100644 --- a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/mysql" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendMysqldExporter(config *mysqld_exporter.Config, instanceKey *string) discovery.Exports { @@ -14,7 +14,7 @@ func (b *ConfigBuilder) appendMysqldExporter(config *mysqld_exporter.Config, ins func toMysqldExporter(config *mysqld_exporter.Config) *mysql.Arguments { return &mysql.Arguments{ - DataSourceName: rivertypes.Secret(config.DataSourceName), + DataSourceName: alloytypes.Secret(config.DataSourceName), EnableCollectors: config.EnableCollectors, DisableCollectors: config.DisableCollectors, SetCollectors: config.SetCollectors, diff --git a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go index 05a4a35aa6..b1bd031ce1 100644 --- a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/oracledb" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendOracledbExporter(config *oracledb_exporter.Config, instanceKey *string) discovery.Exports { @@ -14,7 +14,7 @@ func (b *ConfigBuilder) appendOracledbExporter(config *oracledb_exporter.Config, func toOracledbExporter(config *oracledb_exporter.Config) *oracledb.Arguments { return &oracledb.Arguments{ - ConnectionString: rivertypes.Secret(config.ConnectionString), + ConnectionString: alloytypes.Secret(config.ConnectionString), MaxIdleConns: config.MaxIdleConns, MaxOpenConns: config.MaxOpenConns, QueryTimeout: config.QueryTimeout, diff --git a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go index c5566071bf..13bc519b4a 100644 --- a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/postgres" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendPostgresExporter(config *postgres_exporter.Config, instanceKey *string) discovery.Exports { @@ -13,9 +13,9 @@ func (b *ConfigBuilder) appendPostgresExporter(config *postgres_exporter.Config, } func toPostgresExporter(config *postgres_exporter.Config) *postgres.Arguments { - dataSourceNames := make([]rivertypes.Secret, 0) + dataSourceNames := make([]alloytypes.Secret, 0) for _, dsn := range config.DataSourceNames { - dataSourceNames = append(dataSourceNames, rivertypes.Secret(dsn)) + dataSourceNames = append(dataSourceNames, alloytypes.Secret(dsn)) } return &postgres.Arguments{ diff --git a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go index 4e0bdbd38b..4d77d009c6 100644 --- a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/redis" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendRedisExporter(config *redis_exporter.Config, instanceKey *string) discovery.Exports { @@ -17,7 +17,7 @@ func toRedisExporter(config *redis_exporter.Config) *redis.Arguments { IncludeExporterMetrics: config.IncludeExporterMetrics, RedisAddr: config.RedisAddr, RedisUser: config.RedisUser, - RedisPassword: rivertypes.Secret(config.RedisPassword), + RedisPassword: alloytypes.Secret(config.RedisPassword), RedisPasswordFile: config.RedisPasswordFile, RedisPasswordMapFile: config.RedisPasswordMapFile, Namespace: config.Namespace, diff --git a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go index 7476bf69ae..1f58d1097e 100644 --- a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/static/integrations/snmp_exporter" snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" ) @@ -47,7 +47,7 @@ func toSnmpExporter(config *snmp_exporter.Config) *snmp.Arguments { return &snmp.Arguments{ ConfigFile: config.SnmpConfigFile, - Config: rivertypes.OptionalSecret{}, + Config: alloytypes.OptionalSecret{}, Targets: targets, WalkParams: walkParams, ConfigStruct: snmp_config.Config{ @@ -95,7 +95,7 @@ func toSnmpExporterV2(config *snmp_exporter_v2.Config) *snmp.Arguments { return &snmp.Arguments{ ConfigFile: config.SnmpConfigFile, - Config: rivertypes.OptionalSecret{}, + Config: alloytypes.OptionalSecret{}, Targets: targets, WalkParams: walkParams, ConfigStruct: snmp_config.Config{ diff --git a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go index a59ee2992a..16eff2c7d7 100644 --- a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/snowflake" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendSnowflakeExporter(config *snowflake_exporter.Config, instanceKey *string) discovery.Exports { @@ -16,7 +16,7 @@ func toSnowflakeExporter(config *snowflake_exporter.Config) *snowflake.Arguments return &snowflake.Arguments{ AccountName: config.AccountName, Username: config.Username, - Password: rivertypes.Secret(config.Password), + Password: alloytypes.Secret(config.Password), Role: config.Role, Warehouse: config.Warehouse, } diff --git a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go index 37573399bc..5e485b4972 100644 --- a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go @@ -4,7 +4,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/prometheus/exporter/squid" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func (b *ConfigBuilder) appendSquidExporter(config *squid_exporter.Config, instanceKey *string) discovery.Exports { @@ -16,6 +16,6 @@ func toSquidExporter(config *squid_exporter.Config) *squid.Arguments { return &squid.Arguments{ SquidAddr: config.Address, SquidUser: config.Username, - SquidPassword: rivertypes.Secret(config.Password), + SquidPassword: alloytypes.Secret(config.Password), } } diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index 58debf4bb0..38b52297f7 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -6,7 +6,7 @@ import ( "reflect" "github.com/grafana/agent/internal/component" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/vm" ) @@ -27,7 +27,7 @@ func NewImportString(eval *vm.Evaluator, onContentChange func(map[string]string) } type importStringConfigBlock struct { - Content rivertypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `river:"content,attr"` } func (im *ImportString) Evaluate(scope *vm.Scope) error { diff --git a/internal/flow/internal/testcomponents/module/file/file.go b/internal/flow/internal/testcomponents/module/file/file.go index f5ae8c212f..c211d56519 100644 --- a/internal/flow/internal/testcomponents/module/file/file.go +++ b/internal/flow/internal/testcomponents/module/file/file.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/local/file" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func init() { @@ -46,7 +46,7 @@ type Component struct { mut sync.RWMutex args Arguments - content rivertypes.OptionalSecret + content alloytypes.OptionalSecret managedLocalFile *file.Component inUpdate atomic.Bool @@ -169,14 +169,14 @@ func (c *Component) setArgs(args Arguments) { } // getContent is a goroutine safe way to get content -func (c *Component) getContent() rivertypes.OptionalSecret { +func (c *Component) getContent() alloytypes.OptionalSecret { c.mut.RLock() defer c.mut.RUnlock() return c.content } // setContent is a goroutine safe way to set content -func (c *Component) setContent(content rivertypes.OptionalSecret) { +func (c *Component) setContent(content alloytypes.OptionalSecret) { c.mut.Lock() c.content = content c.mut.Unlock() diff --git a/internal/flow/internal/testcomponents/module/http/http.go b/internal/flow/internal/testcomponents/module/http/http.go index b80a389eb8..5bfc76b79a 100644 --- a/internal/flow/internal/testcomponents/module/http/http.go +++ b/internal/flow/internal/testcomponents/module/http/http.go @@ -10,7 +10,7 @@ import ( remote_http "github.com/grafana/agent/internal/component/remote/http" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func init() { @@ -45,7 +45,7 @@ type Component struct { mut sync.RWMutex args Arguments - content rivertypes.OptionalSecret + content alloytypes.OptionalSecret managedRemoteHTTP *remote_http.Component inUpdate atomic.Bool @@ -167,14 +167,14 @@ func (c *Component) setArgs(args Arguments) { } // getContent is a goroutine safe way to get content -func (c *Component) getContent() rivertypes.OptionalSecret { +func (c *Component) getContent() alloytypes.OptionalSecret { c.mut.RLock() defer c.mut.RUnlock() return c.content } // setContent is a goroutine safe way to set content -func (c *Component) setContent(content rivertypes.OptionalSecret) { +func (c *Component) setContent(content alloytypes.OptionalSecret) { c.mut.Lock() c.content = content c.mut.Unlock() diff --git a/internal/flow/internal/testcomponents/module/string/string.go b/internal/flow/internal/testcomponents/module/string/string.go index df8f3e23d9..65c25dd9dc 100644 --- a/internal/flow/internal/testcomponents/module/string/string.go +++ b/internal/flow/internal/testcomponents/module/string/string.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) func init() { @@ -26,7 +26,7 @@ func init() { // component. type Arguments struct { // Content to load for the module. - Content rivertypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `river:"content,attr"` // Arguments to pass into the module. Arguments map[string]any `river:"arguments,block,optional"` diff --git a/internal/service/http/tls.go b/internal/service/http/tls.go index 40f6332b1e..bf428e4e6b 100644 --- a/internal/service/http/tls.go +++ b/internal/service/http/tls.go @@ -9,7 +9,7 @@ import ( "time" river "github.com/grafana/alloy/syntax" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/regexp" ) @@ -17,7 +17,7 @@ import ( type TLSArguments struct { Cert string `river:"cert_pem,attr,optional"` CertFile string `river:"cert_file,attr,optional"` - Key rivertypes.Secret `river:"key_pem,attr,optional"` + Key alloytypes.Secret `river:"key_pem,attr,optional"` KeyFile string `river:"key_file,attr,optional"` ClientCA string `river:"client_ca_pem,attr,optional"` ClientCAFile string `river:"client_ca_file,attr,optional"` diff --git a/internal/vcs/auth.go b/internal/vcs/auth.go index 66da22dcad..68b1646e64 100644 --- a/internal/vcs/auth.go +++ b/internal/vcs/auth.go @@ -6,7 +6,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/plumbing/transport/ssh" - rivertypes "github.com/grafana/alloy/syntax/alloytypes" + "github.com/grafana/alloy/syntax/alloytypes" ) type GitAuthConfig struct { @@ -34,7 +34,7 @@ func (h *GitAuthConfig) Convert() transport.AuthMethod { type BasicAuth struct { Username string `river:"username,attr"` - Password rivertypes.Secret `river:"password,attr"` + Password alloytypes.Secret `river:"password,attr"` } // Convert converts our type to the native prometheus type @@ -50,9 +50,9 @@ func (b *BasicAuth) Convert() (t transport.AuthMethod) { type SSHKey struct { Username string `river:"username,attr"` - Key rivertypes.Secret `river:"key,attr,optional"` + Key alloytypes.Secret `river:"key,attr,optional"` Keyfile string `river:"key_file,attr,optional"` - Passphrase rivertypes.Secret `river:"passphrase,attr,optional"` + Passphrase alloytypes.Secret `river:"passphrase,attr,optional"` } // Convert converts our type to the native prometheus type From b8fb94ec07dd5256f15e3dfad806c445c6c19fde Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:26:38 -0400 Subject: [PATCH 043/136] all: remove river import alias in favor of syntax --- internal/component/all/all_test.go | 14 ++--- .../component/common/config/types_test.go | 22 ++++---- .../common/kubernetes/kubernetes_test.go | 8 +-- internal/component/common/net/config_test.go | 4 +- .../component/common/relabel/relabel_test.go | 4 +- .../component/discovery/azure/azure_test.go | 12 ++--- .../component/discovery/consul/consul_test.go | 6 +-- .../discovery/consulagent/consulagent_test.go | 8 +-- .../digitalocean/digitalocean_test.go | 10 ++-- internal/component/discovery/dns/dns_test.go | 6 +-- .../component/discovery/docker/docker_test.go | 6 +-- .../discovery/dockerswarm/dockerswarm_test.go | 4 +- .../component/discovery/eureka/eureka_test.go | 10 ++-- .../component/discovery/file/file_test.go | 6 +-- internal/component/discovery/gce/gce_test.go | 6 +-- .../discovery/hetzner/hetzner_test.go | 6 +-- .../component/discovery/http/http_test.go | 4 +- .../component/discovery/ionos/ionos_test.go | 4 +- .../discovery/kubelet/kubelet_test.go | 8 +-- .../discovery/kubernetes/kubernetes_test.go | 8 +-- .../component/discovery/kuma/kuma_test.go | 6 +-- .../component/discovery/linode/linode_test.go | 4 +- .../discovery/marathon/marathon_test.go | 8 +-- .../component/discovery/nerve/nerve_test.go | 6 +-- .../component/discovery/nomad/nomad_test.go | 4 +- .../discovery/openstack/openstack_test.go | 8 +-- .../discovery/ovhcloud/ovhcloud_test.go | 4 +- .../discovery/puppetdb/puppetdb_test.go | 6 +-- .../discovery/relabel/relabel_test.go | 8 +-- .../discovery/scaleway/scaleway_test.go | 6 +-- .../discovery/serverset/serverset_test.go | 4 +- .../component/discovery/triton/triton_test.go | 6 +-- .../component/discovery/uyuni/uyuni_test.go | 4 +- internal/component/faro/receiver/arguments.go | 4 +- .../component/loki/process/process_test.go | 14 ++--- .../process/stages/eventlogmessage_test.go | 4 +- .../loki/process/stages/extensions.go | 10 ++-- .../loki/process/stages/json_test.go | 4 +- .../loki/process/stages/pipeline_test.go | 6 +-- .../component/loki/relabel/relabel_test.go | 16 +++--- .../loki/rules/kubernetes/rules_test.go | 6 +-- .../internal/lokipush/push_api_server_test.go | 6 +-- .../azure_event_hubs/azure_event_hubs_test.go | 8 +-- .../loki/source/docker/docker_test.go | 6 +-- .../component/loki/source/kafka/kafka_test.go | 10 ++-- .../loki/source/kubernetes/kubernetes_test.go | 6 +-- .../loki/source/podlogs/podlogs_test.go | 6 +-- internal/component/loki/write/write_test.go | 16 +++--- .../mimir/rules/kubernetes/rules_test.go | 6 +-- internal/component/otelcol/auth/auth.go | 4 +- .../otelcol/auth/basic/basic_test.go | 4 +- .../otelcol/auth/bearer/bearer_test.go | 4 +- .../component/otelcol/auth/headers/headers.go | 12 ++--- .../otelcol/auth/headers/headers_test.go | 6 +-- .../otelcol/auth/oauth2/oauth2_test.go | 4 +- .../otelcol/auth/sigv4/sigv4_test.go | 4 +- .../component/otelcol/config_filter_test.go | 4 +- internal/component/otelcol/config_retry.go | 8 +-- .../otelcol/connector/host_info/host_info.go | 10 ++-- .../connector/host_info/host_info_test.go | 4 +- .../connector/servicegraph/servicegraph.go | 10 ++-- .../servicegraph/servicegraph_test.go | 4 +- .../otelcol/connector/spanlogs/spanlogs.go | 6 +-- .../connector/spanlogs/spanlogs_test.go | 6 +-- .../connector/spanmetrics/spanmetrics.go | 10 ++-- .../connector/spanmetrics/spanmetrics_test.go | 8 +-- .../otelcol/connector/spanmetrics/types.go | 16 +++--- .../exporter/loadbalancing/loadbalancing.go | 22 ++++---- .../loadbalancing/loadbalancing_test.go | 6 +-- .../otelcol/exporter/otlp/otlp_test.go | 6 +-- .../exporter/otlphttp/otlphttp_test.go | 6 +-- .../exporter/prometheus/prometheus_test.go | 4 +- .../jaeger_remote_sampling_test.go | 10 ++-- .../processor/attributes/attributes_test.go | 52 +++++++++---------- .../otelcol/processor/batch/batch_test.go | 6 +-- .../otelcol/processor/discovery/discovery.go | 10 ++-- .../processor/discovery/discovery_test.go | 12 ++--- .../otelcol/processor/filter/filter_test.go | 4 +- .../k8sattributes/k8sattributes_test.go | 24 ++++----- .../memorylimiter/memorylimiter_test.go | 4 +- .../probabilistic_sampler.go | 10 ++-- .../probabilistic_sampler_test.go | 10 ++-- .../internal/aws/ec2/config.go | 6 +-- .../internal/aws/ecs/config.go | 6 +-- .../internal/aws/eks/config.go | 6 +-- .../internal/aws/elasticbeanstalk/config.go | 6 +-- .../internal/aws/lambda/config.go | 6 +-- .../internal/azure/aks/config.go | 6 +-- .../internal/azure/config.go | 6 +-- .../internal/consul/config.go | 6 +-- .../internal/docker/config.go | 6 +-- .../resourcedetection/internal/gcp/config.go | 6 +-- .../internal/heroku/config.go | 6 +-- .../internal/k8snode/config.go | 6 +-- .../internal/openshift/config.go | 6 +-- .../internal/system/config.go | 6 +-- .../resourcedetection/resourcedetection.go | 10 ++-- .../resourcedetection_test.go | 4 +- .../otelcol/processor/span/span_test.go | 20 +++---- .../tail_sampling/tail_sampling_test.go | 12 ++--- .../otelcol/processor/tail_sampling/types.go | 6 +-- .../processor/transform/transform_test.go | 4 +- .../otelcol/receiver/jaeger/jaeger_test.go | 14 ++--- .../otelcol/receiver/kafka/kafka_test.go | 8 +-- .../otelcol/receiver/loki/loki_test.go | 4 +- .../receiver/opencensus/opencensus_test.go | 10 ++-- .../otelcol/receiver/otlp/otlp_test.go | 12 ++--- .../receiver/prometheus/prometheus_test.go | 4 +- .../otelcol/receiver/vcenter/vcenter_test.go | 6 +-- .../otelcol/receiver/zipkin/zipkin_test.go | 8 +-- .../exporter/blackbox/blackbox_test.go | 10 ++-- .../exporter/cadvisor/cadvisor_test.go | 4 +- .../prometheus/exporter/cloudwatch/config.go | 6 +-- .../exporter/cloudwatch/config_test.go | 4 +- .../exporter/dnsmasq/dnsmasq_test.go | 6 +-- .../elasticsearch/elasticsearch_test.go | 6 +-- .../prometheus/exporter/gcp/gcp_test.go | 4 +- .../prometheus/exporter/github/github_test.go | 4 +- .../prometheus/exporter/kafka/kafka_test.go | 10 ++-- .../exporter/memcached/memcached_test.go | 10 ++-- .../exporter/mongodb/mongodb_test.go | 6 +-- .../prometheus/exporter/mssql/mssql_test.go | 14 ++--- .../prometheus/exporter/mysql/mysql_test.go | 6 +-- .../exporter/oracledb/oracledb_test.go | 6 +-- .../exporter/postgres/postgres_test.go | 6 +-- .../exporter/process/process_test.go | 6 +-- .../prometheus/exporter/redis/redis_test.go | 8 +-- .../prometheus/exporter/snmp/snmp_test.go | 8 +-- .../exporter/snowflake/snowflake_test.go | 6 +-- .../prometheus/exporter/squid/squid_test.go | 6 +-- .../prometheus/exporter/statsd/statsd_test.go | 8 +-- .../windows/config_default_windows_test.go | 4 +- .../exporter/windows/windows_test.go | 6 +-- .../prometheus/operator/types_test.go | 4 +- .../prometheus/relabel/relabel_test.go | 6 +-- .../remotewrite/remote_write_test.go | 4 +- .../prometheus/remotewrite/types_test.go | 4 +- .../prometheus/scrape/scrape_test.go | 10 ++-- .../pyroscope/ebpf/ebpf_linux_test.go | 6 +-- .../component/pyroscope/scrape/scrape_test.go | 6 +-- .../component/pyroscope/write/write_test.go | 6 +-- internal/component/remote/http/http_test.go | 6 +-- .../remote/kubernetes/kubernetes_test.go | 4 +- internal/component/remote/vault/vault_test.go | 6 +-- .../internal/common/convert_appendable.go | 4 +- .../internal/common/convert_logs_receiver.go | 4 +- .../internal/common/convert_targets.go | 4 +- .../converter/internal/common/river_utils.go | 8 +-- .../internal/common/river_utils_test.go | 4 +- internal/flow/logging/options.go | 6 +-- internal/service/http/http_test.go | 4 +- internal/service/http/tls.go | 6 +-- internal/service/remotecfg/remotecfg.go | 8 +-- internal/service/remotecfg/remotecfg_test.go | 4 +- syntax/encoding/riverjson/riverjson_test.go | 4 +- syntax/syntax_test.go | 12 ++--- 156 files changed, 579 insertions(+), 579 deletions(-) diff --git a/internal/component/all/all_test.go b/internal/component/all/all_test.go index a6dbbf34f0..21030d559c 100644 --- a/internal/component/all/all_test.go +++ b/internal/component/all/all_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/grafana/agent/internal/component" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,7 +16,7 @@ import ( // recursively traverses through its Arguments type to guarantee that no two // calls to SetDefault result in pointer reuse. // -// Nested types that also implement river.Defaulter are also checked. +// Nested types that also implement syntax.Defaulter are also checked. func TestSetDefault_NoPointerReuse(t *testing.T) { allComponents := component.AllNames() for _, componentName := range allComponents { @@ -37,10 +37,10 @@ func testNoReusePointer(t *testing.T, reg component.Registration) { args2 = reg.CloneArguments() ) - if args1, ok := args1.(river.Defaulter); ok { + if args1, ok := args1.(syntax.Defaulter); ok { args1.SetToDefault() } - if args2, ok := args2.(river.Defaulter); ok { + if args2, ok := args2.(syntax.Defaulter); ok { args2.SetToDefault() } @@ -60,7 +60,7 @@ func testNoReusePointer(t *testing.T, reg component.Registration) { assert.Fail(t, fmt.Sprintf("Detected SetToDefault pointer reuse at %s", fullPath), - "Types implementing river.Defaulter must not reuse pointers across multiple calls. Doing so leads to default values being changed when unmarshaling configuration files. If you're seeing this error, check the path above and ensure that copies are being made of any pointers in all instances of SetToDefault calls where that field is used.", + "Types implementing syntax.Defaulter must not reuse pointers across multiple calls. Doing so leads to default values being changed when unmarshaling configuration files. If you're seeing this error, check the path above and ensure that copies are being made of any pointers in all instances of SetToDefault calls where that field is used.", ) } } @@ -165,11 +165,11 @@ func pointersMatch(a, b reflect.Value) bool { } // initValue initializes nil pointers. If the nil pointer implements -// river.Defaulter, it is also set to default values. +// syntax.Defaulter, it is also set to default values. func initValue(rv reflect.Value) { if rv.Kind() == reflect.Pointer && rv.IsNil() { rv.Set(reflect.New(rv.Type().Elem())) - if defaulter, ok := rv.Interface().(river.Defaulter); ok { + if defaulter, ok := rv.Interface().(syntax.Defaulter); ok { defaulter.SetToDefault() } } diff --git a/internal/component/common/config/types_test.go b/internal/component/common/config/types_test.go index a2f31f8439..4231e6480e 100644 --- a/internal/component/common/config/types_test.go +++ b/internal/component/common/config/types_test.go @@ -3,7 +3,7 @@ package config import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -25,7 +25,7 @@ func TestHTTPClientConfigBearerToken(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -38,7 +38,7 @@ func TestHTTPClientConfigBearerTokenFile(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -55,7 +55,7 @@ func TestHTTPClientConfigBasicAuthPassword(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -72,7 +72,7 @@ func TestHTTPClientConfigBasicAuthPasswordFile(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -89,7 +89,7 @@ func TestHTTPClientConfigAuthorizationCredentials(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -106,7 +106,7 @@ func TestHTTPClientConfigAuthorizationCredentialsFile(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -135,7 +135,7 @@ func TestHTTPClientConfigOath2ClientSecret(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -156,7 +156,7 @@ func TestHTTPClientConfigOath2ClientSecretFile(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) } @@ -172,7 +172,7 @@ func TestOath2TLSConvert(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.NoError(t, err) newCfg := httpClientConfig.Convert() require.NotNil(t, newCfg) @@ -227,6 +227,6 @@ func TestHTTPClientBadConfig(t *testing.T) { ` var httpClientConfig HTTPClientConfig - err := river.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &httpClientConfig) require.ErrorContains(t, err, "at most one of basic_auth password & password_file must be configured") } diff --git a/internal/component/common/kubernetes/kubernetes_test.go b/internal/component/common/kubernetes/kubernetes_test.go index 596de1f698..7c8efb5d1a 100644 --- a/internal/component/common/kubernetes/kubernetes_test.go +++ b/internal/component/common/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -13,14 +13,14 @@ func TestUnmarshalRiver(t *testing.T) { proxy_url = "http://0.0.0.0:11111" ` var args ClientArguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) exampleRiverConfig = ` kubeconfig_file = "/etc/k8s/kubeconfig.yaml" ` var args1 ClientArguments - err = river.Unmarshal([]byte(exampleRiverConfig), &args1) + err = syntax.Unmarshal([]byte(exampleRiverConfig), &args1) require.NoError(t, err) } @@ -54,7 +54,7 @@ func TestBadConfigs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var args ClientArguments - err := river.Unmarshal([]byte(test.config), &args) + err := syntax.Unmarshal([]byte(test.config), &args) require.Error(t, err) }) } diff --git a/internal/component/common/net/config_test.go b/internal/component/common/net/config_test.go index 7b79c02451..56a04b43ad 100644 --- a/internal/component/common/net/config_test.go +++ b/internal/component/common/net/config_test.go @@ -7,7 +7,7 @@ import ( dskit "github.com/grafana/dskit/server" "github.com/stretchr/testify/require" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) // testArguments mimics an arguments type used by a component, applying the defaults to ServerConfig @@ -150,7 +150,7 @@ func TestConfig(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { args := testArguments{} - err := river.Unmarshal([]byte(tc.raw), &args) + err := syntax.Unmarshal([]byte(tc.raw), &args) require.Equal(t, tc.errExpected, err != nil) wConfig := args.Server.convert() tc.assert(t, wConfig) diff --git a/internal/component/common/relabel/relabel_test.go b/internal/component/common/relabel/relabel_test.go index 2ae9672465..3138870f1b 100644 --- a/internal/component/common/relabel/relabel_test.go +++ b/internal/component/common/relabel/relabel_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) func TestParseConfig(t *testing.T) { @@ -58,7 +58,7 @@ func TestParseConfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() var cfg Config - err := river.Unmarshal([]byte(tt.cfg), &cfg) + err := syntax.Unmarshal([]byte(tt.cfg), &cfg) if tt.expectErr { require.Error(t, err) } else { diff --git a/internal/component/discovery/azure/azure_test.go b/internal/component/discovery/azure/azure_test.go index 6bc8bc9afa..f05f16c0fb 100644 --- a/internal/component/discovery/azure/azure_test.go +++ b/internal/component/discovery/azure/azure_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" @@ -29,7 +29,7 @@ func TestRiverUnmarshal(t *testing.T) { proxy_url = "http://example:8080"` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, "AzureTestCloud", args.Environment) @@ -56,7 +56,7 @@ func TestRiverUnmarshal_OAuthRequiredFields(t *testing.T) { client_id = "clientid" }` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.Error(t, err) } @@ -69,7 +69,7 @@ func TestValidate(t *testing.T) { resource_group = "test"` var args Arguments - err := river.Unmarshal([]byte(noAuth), &args) + err := syntax.Unmarshal([]byte(noAuth), &args) require.ErrorContains(t, err, "exactly one of oauth or managed_identity must be specified") bothAuth := ` @@ -87,7 +87,7 @@ func TestValidate(t *testing.T) { client_id = "clientid" }` var args2 Arguments - err = river.Unmarshal([]byte(bothAuth), &args2) + err = syntax.Unmarshal([]byte(bothAuth), &args2) require.ErrorContains(t, err, "exactly one of oauth or managed_identity must be specified") invalidTLS := ` @@ -104,7 +104,7 @@ func TestValidate(t *testing.T) { cert_pem = "certpem" }` var args3 Arguments - err = river.Unmarshal([]byte(invalidTLS), &args3) + err = syntax.Unmarshal([]byte(invalidTLS), &args3) require.ErrorContains(t, err, "at most one of cert_pem and cert_file must be configured") } diff --git a/internal/component/discovery/consul/consul_test.go b/internal/component/discovery/consul/consul_test.go index a265c6d51e..baef1eba47 100644 --- a/internal/component/discovery/consul/consul_test.go +++ b/internal/component/discovery/consul/consul_test.go @@ -3,7 +3,7 @@ package consul import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -34,6 +34,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth password & password_file must be configured") } diff --git a/internal/component/discovery/consulagent/consulagent_test.go b/internal/component/discovery/consulagent/consulagent_test.go index 4648059ec3..9415faf036 100644 --- a/internal/component/discovery/consulagent/consulagent_test.go +++ b/internal/component/discovery/consulagent/consulagent_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" @@ -29,7 +29,7 @@ func TestConvert(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) converted := args.Convert() @@ -69,7 +69,7 @@ func TestBadTLSRiverConfig(t *testing.T) { // Make sure the TLSConfig Validate function is being utilized correctly. var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of ca_pem and ca_file must be configured") } @@ -92,6 +92,6 @@ func TestBadRefreshIntervalRiverConfig(t *testing.T) { // Make sure the Refresh Interval is tested correctly. var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "refresh_interval must be greater than 0") } diff --git a/internal/component/discovery/digitalocean/digitalocean_test.go b/internal/component/discovery/digitalocean/digitalocean_test.go index 8d3bff8daa..18a628fc3e 100644 --- a/internal/component/discovery/digitalocean/digitalocean_test.go +++ b/internal/component/discovery/digitalocean/digitalocean_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" prom_common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -21,7 +21,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) assert.Equal(t, 5*time.Minute, args.RefreshInterval) @@ -36,7 +36,7 @@ func TestRiverUnmarshal(t *testing.T) { enable_http2 = false bearer_token = "token" ` - err = river.Unmarshal([]byte(fullerExampleRiverConfig), &args) + err = syntax.Unmarshal([]byte(fullerExampleRiverConfig), &args) require.NoError(t, err) assert.Equal(t, 3*time.Minute, args.RefreshInterval) assert.Equal(t, 9119, args.Port) @@ -54,7 +54,7 @@ func TestBadRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(badConfigTooManyBearerTokens), &args) + err := syntax.Unmarshal([]byte(badConfigTooManyBearerTokens), &args) require.ErrorContains(t, err, "exactly one of bearer_token or bearer_token_file must be specified") var badConfigMissingAuth = ` @@ -62,7 +62,7 @@ func TestBadRiverConfig(t *testing.T) { port = 8181 ` var args2 Arguments - err = river.Unmarshal([]byte(badConfigMissingAuth), &args2) + err = syntax.Unmarshal([]byte(badConfigMissingAuth), &args2) require.ErrorContains(t, err, "exactly one of bearer_token or bearer_token_file must be specified") } diff --git a/internal/component/discovery/dns/dns_test.go b/internal/component/discovery/dns/dns_test.go index 7b9264ebdf..da0d3e4411 100644 --- a/internal/component/discovery/dns/dns_test.go +++ b/internal/component/discovery/dns/dns_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" @@ -20,7 +20,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) assert.Equal(t, 5*time.Minute, args.RefreshInterval) @@ -57,7 +57,7 @@ func TestBadRiverConfig(t *testing.T) { cfg := tst.Config t.Run(tst.Desc, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.Error(t, err) }) } diff --git a/internal/component/discovery/docker/docker_test.go b/internal/component/discovery/docker/docker_test.go index 542844ecb6..ce927fcade 100644 --- a/internal/component/discovery/docker/docker_test.go +++ b/internal/component/discovery/docker/docker_test.go @@ -3,7 +3,7 @@ package docker import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -13,7 +13,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -26,6 +26,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/discovery/dockerswarm/dockerswarm_test.go b/internal/component/discovery/dockerswarm/dockerswarm_test.go index de83ac9819..d36d2198e7 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm_test.go +++ b/internal/component/discovery/dockerswarm/dockerswarm_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -34,7 +34,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.ElementsMatch(t, []Filter{{"n1", []string{"v11", "v12"}}, {"n2", []string{"v21"}}}, args.Filters) assert.Equal(t, "unix:///var/run/docker.sock", args.Host) diff --git a/internal/component/discovery/eureka/eureka_test.go b/internal/component/discovery/eureka/eureka_test.go index 5bd367f08f..618150e39a 100644 --- a/internal/component/discovery/eureka/eureka_test.go +++ b/internal/component/discovery/eureka/eureka_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -22,7 +22,7 @@ func TestUnmarshal(t *testing.T) { password = "examplepassword" }` var args Arguments - err := river.Unmarshal([]byte(exampleCfg), &args) + err := syntax.Unmarshal([]byte(exampleCfg), &args) require.NoError(t, err) require.Equal(t, "http://localhost:8080/eureka/v1", args.Server) @@ -40,7 +40,7 @@ func TestValidate(t *testing.T) { }` var args Arguments - err := river.Unmarshal([]byte(noServer), &args) + err := syntax.Unmarshal([]byte(noServer), &args) require.Error(t, err) emptyServer := ` @@ -50,7 +50,7 @@ func TestValidate(t *testing.T) { username = "exampleuser" password = "examplepassword" }` - err = river.Unmarshal([]byte(emptyServer), &args) + err = syntax.Unmarshal([]byte(emptyServer), &args) require.Error(t, err) invalidServer := ` @@ -60,7 +60,7 @@ func TestValidate(t *testing.T) { username = "exampleuser" password = "examplepassword" }` - err = river.Unmarshal([]byte(invalidServer), &args) + err = syntax.Unmarshal([]byte(invalidServer), &args) require.Error(t, err) } diff --git a/internal/component/discovery/file/file_test.go b/internal/component/discovery/file/file_test.go index 9594936603..eb126ce9dd 100644 --- a/internal/component/discovery/file/file_test.go +++ b/internal/component/discovery/file/file_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -15,7 +15,7 @@ func TestUnmarshal(t *testing.T) { files = ["file1", "file2"]` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) require.Equal(t, 2, len(args.Files)) require.Equal(t, 10*time.Minute, args.RefreshInterval) @@ -25,7 +25,7 @@ func TestUnmarshal_Defaults(t *testing.T) { cfg := `files = ["file1"]` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) require.Equal(t, 1, len(args.Files)) require.Equal(t, 5*time.Minute, args.RefreshInterval) diff --git a/internal/component/discovery/gce/gce_test.go b/internal/component/discovery/gce/gce_test.go index 018dbb6af7..6d646794c3 100644 --- a/internal/component/discovery/gce/gce_test.go +++ b/internal/component/discovery/gce/gce_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -19,7 +19,7 @@ func TestUnmarshalRiver(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) } @@ -32,7 +32,7 @@ func TestUnmarshalRiverInvalid(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) // Validate that project and zone are required. require.Error(t, err) diff --git a/internal/component/discovery/hetzner/hetzner_test.go b/internal/component/discovery/hetzner/hetzner_test.go index e4087ea22b..6b11bcf8d6 100644 --- a/internal/component/discovery/hetzner/hetzner_test.go +++ b/internal/component/discovery/hetzner/hetzner_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" @@ -17,7 +17,7 @@ func TestRiverUnmarshal(t *testing.T) { role = "robot"` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, 8080, args.Port) @@ -30,7 +30,7 @@ func TestValidate(t *testing.T) { role = "test"` var args Arguments - err := river.Unmarshal([]byte(wrongRole), &args) + err := syntax.Unmarshal([]byte(wrongRole), &args) require.ErrorContains(t, err, "unknown role test, must be one of robot or hcloud") } diff --git a/internal/component/discovery/http/http_test.go b/internal/component/discovery/http/http_test.go index 6c371e6897..6c81591b9b 100644 --- a/internal/component/discovery/http/http_test.go +++ b/internal/component/discovery/http/http_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -29,7 +29,7 @@ func TestRiverConfig(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) assert.Equal(t, args.HTTPClientConfig.BasicAuth.Username, "123") } diff --git a/internal/component/discovery/ionos/ionos_test.go b/internal/component/discovery/ionos/ionos_test.go index b732111896..4129eb7112 100644 --- a/internal/component/discovery/ionos/ionos_test.go +++ b/internal/component/discovery/ionos/ionos_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -25,7 +25,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, "datacenter_id", args.DatacenterID) assert.Equal(t, 20*time.Second, args.RefreshInterval) diff --git a/internal/component/discovery/kubelet/kubelet_test.go b/internal/component/discovery/kubelet/kubelet_test.go index a117a6b962..cec0e2b05d 100644 --- a/internal/component/discovery/kubelet/kubelet_test.go +++ b/internal/component/discovery/kubelet/kubelet_test.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -19,7 +19,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -31,12 +31,12 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") // Make sure that URL defaults to https://localhost:10250 var args2 Arguments - err = river.Unmarshal([]byte{}, &args2) + err = syntax.Unmarshal([]byte{}, &args2) require.NoError(t, err) require.Equal(t, args2.URL.String(), "https://localhost:10250") } diff --git a/internal/component/discovery/kubernetes/kubernetes_test.go b/internal/component/discovery/kubernetes/kubernetes_test.go index 7d70e5a16f..69779f42e3 100644 --- a/internal/component/discovery/kubernetes/kubernetes_test.go +++ b/internal/component/discovery/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -14,7 +14,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -30,7 +30,7 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } @@ -43,6 +43,6 @@ func TestAttachMetadata(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } diff --git a/internal/component/discovery/kuma/kuma_test.go b/internal/component/discovery/kuma/kuma_test.go index fed1de55ca..3a4942c71a 100644 --- a/internal/component/discovery/kuma/kuma_test.go +++ b/internal/component/discovery/kuma/kuma_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -20,7 +20,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -35,7 +35,7 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the TLSConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of ca_pem and ca_file must be configured") } diff --git a/internal/component/discovery/linode/linode_test.go b/internal/component/discovery/linode/linode_test.go index fc7daf7852..9c7361e623 100644 --- a/internal/component/discovery/linode/linode_test.go +++ b/internal/component/discovery/linode/linode_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -22,7 +22,7 @@ func TestRiverConfig(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } diff --git a/internal/component/discovery/marathon/marathon_test.go b/internal/component/discovery/marathon/marathon_test.go index 9ba2605a7b..bade375386 100644 --- a/internal/component/discovery/marathon/marathon_test.go +++ b/internal/component/discovery/marathon/marathon_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -21,7 +21,7 @@ func TestRiverUnmarshalWithAuthToken(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.ElementsMatch(t, []string{"serv1", "serv2"}, args.Servers) @@ -37,7 +37,7 @@ func TestRiverUnmarshalWithAuthTokenFile(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.ElementsMatch(t, []string{"serv1", "serv2"}, args.Servers) @@ -56,7 +56,7 @@ func TestRiverUnmarshalWithBasicAuth(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.ElementsMatch(t, []string{"serv1", "serv2"}, args.Servers) diff --git a/internal/component/discovery/nerve/nerve_test.go b/internal/component/discovery/nerve/nerve_test.go index 25ca6c235f..269ca5b057 100644 --- a/internal/component/discovery/nerve/nerve_test.go +++ b/internal/component/discovery/nerve/nerve_test.go @@ -3,7 +3,7 @@ package nerve import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -15,7 +15,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -31,5 +31,5 @@ func TestBadRiverConfig(t *testing.T) { timeout = "0s" ` - require.ErrorContains(t, river.Unmarshal([]byte(riverConfig), &args), "timeout must be greater than 0") + require.ErrorContains(t, syntax.Unmarshal([]byte(riverConfig), &args), "timeout must be greater than 0") } diff --git a/internal/component/discovery/nomad/nomad_test.go b/internal/component/discovery/nomad/nomad_test.go index 880ac7801f..24597cce83 100644 --- a/internal/component/discovery/nomad/nomad_test.go +++ b/internal/component/discovery/nomad/nomad_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" @@ -23,7 +23,7 @@ func TestRiverUnmarshal(t *testing.T) { proxy_url = "http://example:8080"` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, false, args.AllowStale) diff --git a/internal/component/discovery/openstack/openstack_test.go b/internal/component/discovery/openstack/openstack_test.go index 9b42836d46..1fec5b50d7 100644 --- a/internal/component/discovery/openstack/openstack_test.go +++ b/internal/component/discovery/openstack/openstack_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/openstack" @@ -39,7 +39,7 @@ func TestUnmarshal(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) } @@ -50,7 +50,7 @@ func TestValidate(t *testing.T) { availability = "private"` var args Arguments - err := river.Unmarshal([]byte(wrongAvailability), &args) + err := syntax.Unmarshal([]byte(wrongAvailability), &args) require.ErrorContains(t, err, "unknown availability private, must be one of admin, internal or public") wrongRole := ` @@ -59,7 +59,7 @@ func TestValidate(t *testing.T) { availability = "public"` var args2 Arguments - err = river.Unmarshal([]byte(wrongRole), &args2) + err = syntax.Unmarshal([]byte(wrongRole), &args2) require.ErrorContains(t, err, "unknown availability private, must be one of instance or hypervisor") } diff --git a/internal/component/discovery/ovhcloud/ovhcloud_test.go b/internal/component/discovery/ovhcloud/ovhcloud_test.go index 3d644bb6eb..064c5a5e01 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud_test.go +++ b/internal/component/discovery/ovhcloud/ovhcloud_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/discovery/ovhcloud" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" prom_ovh "github.com/prometheus/prometheus/discovery/ovhcloud" "github.com/stretchr/testify/require" @@ -119,7 +119,7 @@ func TestUnmarshal(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args ovhcloud.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return diff --git a/internal/component/discovery/puppetdb/puppetdb_test.go b/internal/component/discovery/puppetdb/puppetdb_test.go index 8151dea5e2..e40f51fa55 100644 --- a/internal/component/discovery/puppetdb/puppetdb_test.go +++ b/internal/component/discovery/puppetdb/puppetdb_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gotest.tools/assert" @@ -24,7 +24,7 @@ basic_auth { func TestRiverConfig(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) assert.Equal(t, args.HTTPClientConfig.BasicAuth.Username, "123") assert.Equal(t, args.RefreshInterval, time.Minute) @@ -36,7 +36,7 @@ func TestRiverConfig(t *testing.T) { func TestConvert(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) sd := args.Convert() diff --git a/internal/component/discovery/relabel/relabel_test.go b/internal/component/discovery/relabel/relabel_test.go index ace24bd895..eb6add2ca9 100644 --- a/internal/component/discovery/relabel/relabel_test.go +++ b/internal/component/discovery/relabel/relabel_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/component/discovery/relabel" "github.com/grafana/agent/internal/flow/componenttest" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -60,7 +60,7 @@ rule { } var args relabel.Arguments - require.NoError(t, river.Unmarshal([]byte(riverArguments), &args)) + require.NoError(t, syntax.Unmarshal([]byte(riverArguments), &args)) tc, err := componenttest.NewControllerFromID(nil, "discovery.relabel") require.NoError(t, err) @@ -84,7 +84,7 @@ rule { regex = "up" }` var args relabel.Arguments - require.NoError(t, river.Unmarshal([]byte(originalCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(originalCfg), &args)) tc, err := componenttest.NewControllerFromID(nil, "discovery.relabel") require.NoError(t, err) @@ -108,7 +108,7 @@ rule { source_labels = ["__name__"] regex = "up" }` - require.NoError(t, river.Unmarshal([]byte(updatedCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(updatedCfg), &args)) require.NoError(t, tc.Update(args)) exports = tc.Exports().(relabel.Exports) diff --git a/internal/component/discovery/scaleway/scaleway_test.go b/internal/component/discovery/scaleway/scaleway_test.go index 85683dd50c..c33510bafc 100644 --- a/internal/component/discovery/scaleway/scaleway_test.go +++ b/internal/component/discovery/scaleway/scaleway_test.go @@ -3,7 +3,7 @@ package scaleway import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -40,7 +40,7 @@ func Test(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(tc.config), &args) + err := syntax.Unmarshal([]byte(tc.config), &args) require.NoError(t, err) // Assert that args.Convert() doesn't panic. @@ -67,6 +67,6 @@ func TestUnsafeCast(t *testing.T) { secret_key = "00000000-0000-0000-0000-000000000000" ` var args Arguments - err := river.Unmarshal([]byte(input), &args) + err := syntax.Unmarshal([]byte(input), &args) require.ErrorContains(t, err, "invalid project ID format") } diff --git a/internal/component/discovery/serverset/serverset_test.go b/internal/component/discovery/serverset/serverset_test.go index 9d7722a4f8..c7bd2b26fc 100644 --- a/internal/component/discovery/serverset/serverset_test.go +++ b/internal/component/discovery/serverset/serverset_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -75,7 +75,7 @@ func Test(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(tc.config), &args) + err := syntax.Unmarshal([]byte(tc.config), &args) tc.assertions(t, args, err) }) } diff --git a/internal/component/discovery/triton/triton_test.go b/internal/component/discovery/triton/triton_test.go index 6acff0c3b8..a4339caf0a 100644 --- a/internal/component/discovery/triton/triton_test.go +++ b/internal/component/discovery/triton/triton_test.go @@ -3,7 +3,7 @@ package triton import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -19,7 +19,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -37,6 +37,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the TLSConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of ca_pem and ca_file must be configured") } diff --git a/internal/component/discovery/uyuni/uyuni_test.go b/internal/component/discovery/uyuni/uyuni_test.go index 45d3e93772..e5b1536430 100644 --- a/internal/component/discovery/uyuni/uyuni_test.go +++ b/internal/component/discovery/uyuni/uyuni_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/common/config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -24,7 +24,7 @@ func TestUnmarshal(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) } diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index 2bc233b363..d9b006d26f 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -6,7 +6,7 @@ import ( "github.com/alecthomas/units" "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/otelcol" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" ) @@ -19,7 +19,7 @@ type Arguments struct { Output OutputArguments `river:"output,block"` } -var _ river.Defaulter = (*Arguments)(nil) +var _ syntax.Defaulter = (*Arguments)(nil) // SetToDefault applies default settings. func (args *Arguments) SetToDefault() { diff --git a/internal/component/loki/process/process_test.go b/internal/component/loki/process/process_test.go index bdefb8e7fe..5178cc92c2 100644 --- a/internal/component/loki/process/process_test.go +++ b/internal/component/loki/process/process_test.go @@ -15,7 +15,7 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -67,7 +67,7 @@ func TestJSONLabelsStage(t *testing.T) { Stages []stages.StageConfig `river:"stage,enum"` } var stagesCfg cfg - err := river.Unmarshal([]byte(stg), &stagesCfg) + err := syntax.Unmarshal([]byte(stg), &stagesCfg) require.NoError(t, err) ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() @@ -154,7 +154,7 @@ stage.label_keep { Stages []stages.StageConfig `river:"stage,enum"` } var stagesCfg cfg - err := river.Unmarshal([]byte(stg), &stagesCfg) + err := syntax.Unmarshal([]byte(stg), &stagesCfg) require.NoError(t, err) ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() @@ -249,7 +249,7 @@ stage.labels { Stages []stages.StageConfig `river:"stage,enum"` } var stagesCfg cfg - err := river.Unmarshal([]byte(stg), &stagesCfg) + err := syntax.Unmarshal([]byte(stg), &stagesCfg) require.NoError(t, err) ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() @@ -328,8 +328,8 @@ stage.static_labels { ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() var args1, args2 Arguments - require.NoError(t, river.Unmarshal([]byte(stg1), &args1)) - require.NoError(t, river.Unmarshal([]byte(stg2), &args2)) + require.NoError(t, syntax.Unmarshal([]byte(stg1), &args1)) + require.NoError(t, syntax.Unmarshal([]byte(stg2), &args2)) args1.ForwardTo = []loki.LogsReceiver{ch1} args2.ForwardTo = []loki.LogsReceiver{ch2} @@ -415,7 +415,7 @@ func TestDeadlockWithFrequentUpdates(t *testing.T) { Stages []stages.StageConfig `river:"stage,enum"` } var stagesCfg cfg - err := river.Unmarshal([]byte(stg), &stagesCfg) + err := syntax.Unmarshal([]byte(stg), &stagesCfg) require.NoError(t, err) ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() diff --git a/internal/component/loki/process/stages/eventlogmessage_test.go b/internal/component/loki/process/stages/eventlogmessage_test.go index 4ba93e6f01..2ec374d958 100644 --- a/internal/component/loki/process/stages/eventlogmessage_test.go +++ b/internal/component/loki/process/stages/eventlogmessage_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" util_log "github.com/grafana/loki/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -146,7 +146,7 @@ func TestEventLogMessageConfig_validate(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { var config Configs - err := river.Unmarshal([]byte(tt.config), &config) + err := syntax.Unmarshal([]byte(tt.config), &config) if err == nil { require.Len(t, config.Stages, 1) err = config.Stages[0].EventLogMessageConfig.Validate() diff --git a/internal/component/loki/process/stages/extensions.go b/internal/component/loki/process/stages/extensions.go index cc802be61d..61071b8825 100644 --- a/internal/component/loki/process/stages/extensions.go +++ b/internal/component/loki/process/stages/extensions.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/flow/logging/level" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" ) @@ -28,8 +28,8 @@ type CRIConfig struct { } var ( - _ river.Defaulter = (*CRIConfig)(nil) - _ river.Validator = (*CRIConfig)(nil) + _ syntax.Defaulter = (*CRIConfig)(nil) + _ syntax.Validator = (*CRIConfig)(nil) ) // DefaultCRIConfig contains the default CRIConfig values. @@ -39,12 +39,12 @@ var DefaultCRIConfig = CRIConfig{ MaxPartialLineSizeTruncate: false, } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *CRIConfig) SetToDefault() { *args = DefaultCRIConfig } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *CRIConfig) Validate() error { if args.MaxPartialLines <= 0 { return fmt.Errorf("max_partial_lines must be greater than 0") diff --git a/internal/component/loki/process/stages/json_test.go b/internal/component/loki/process/stages/json_test.go index c60db76157..558cea0b42 100644 --- a/internal/component/loki/process/stages/json_test.go +++ b/internal/component/loki/process/stages/json_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) var testJSONRiverSingleStageWithoutSource = ` @@ -100,7 +100,7 @@ func TestRiver(t *testing.T) { // testing that we can use river data into the config structure. var got JSONConfig - err := river.Unmarshal([]byte(jsonCfg), &got) + err := syntax.Unmarshal([]byte(jsonCfg), &got) assert.NoError(t, err, "error while un-marshalling config: %s", err) want := JSONConfig{ diff --git a/internal/component/loki/process/stages/pipeline_test.go b/internal/component/loki/process/stages/pipeline_test.go index 025e7826d0..d422cf2cc3 100644 --- a/internal/component/loki/process/stages/pipeline_test.go +++ b/internal/component/loki/process/stages/pipeline_test.go @@ -11,7 +11,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/flow/logging/level" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" util_log "github.com/grafana/loki/pkg/util/log" "github.com/prometheus/client_golang/prometheus" @@ -45,7 +45,7 @@ func processEntries(s Stage, entries ...Entry) []Entry { func loadConfig(yml string) []StageConfig { var config Configs - err := river.Unmarshal([]byte(yml), &config) + err := syntax.Unmarshal([]byte(yml), &config) if err != nil { panic(err) } @@ -209,7 +209,7 @@ func TestPipeline_Process(t *testing.T) { t.Run(tName, func(t *testing.T) { var config Configs - err := river.Unmarshal([]byte(tt.config), &config) + err := syntax.Unmarshal([]byte(tt.config), &config) require.NoError(t, err) p, err := NewPipeline(util_log.Logger, loadConfig(tt.config), nil, prometheus.DefaultRegisterer) diff --git a/internal/component/loki/relabel/relabel_test.go b/internal/component/loki/relabel/relabel_test.go index 73cc06f640..54a3c2f079 100644 --- a/internal/component/loki/relabel/relabel_test.go +++ b/internal/component/loki/relabel/relabel_test.go @@ -14,7 +14,7 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -48,7 +48,7 @@ func TestRelabeling(t *testing.T) { Rcs []*flow_relabel.Config `river:"rule,block,optional"` } var relabelConfigs cfg - err := river.Unmarshal([]byte(rc), &relabelConfigs) + err := syntax.Unmarshal([]byte(rc), &relabelConfigs) require.NoError(t, err) ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() @@ -111,7 +111,7 @@ func BenchmarkRelabelComponent(b *testing.B) { Rcs []*flow_relabel.Config `river:"rule,block,optional"` } var relabelConfigs cfg - _ = river.Unmarshal([]byte(rc), &relabelConfigs) + _ = syntax.Unmarshal([]byte(rc), &relabelConfigs) ch1 := loki.NewLogsReceiver() // Create and run the component, so that it relabels and forwards logs. @@ -157,7 +157,7 @@ func TestCache(t *testing.T) { Rcs []*flow_relabel.Config `river:"rule,block,optional"` } var relabelConfigs cfg - err := river.Unmarshal([]byte(rc), &relabelConfigs) + err := syntax.Unmarshal([]byte(rc), &relabelConfigs) require.NoError(t, err) ch1 := loki.NewLogsReceiver() @@ -309,8 +309,8 @@ rule { ch1, ch2 := loki.NewLogsReceiver(), loki.NewLogsReceiver() var args1, args2 Arguments - require.NoError(t, river.Unmarshal([]byte(stg1), &args1)) - require.NoError(t, river.Unmarshal([]byte(stg2), &args2)) + require.NoError(t, syntax.Unmarshal([]byte(stg1), &args1)) + require.NoError(t, syntax.Unmarshal([]byte(stg2), &args2)) args1.ForwardTo = []loki.LogsReceiver{ch1} args2.ForwardTo = []loki.LogsReceiver{ch2} @@ -387,7 +387,7 @@ func TestRuleGetter(t *testing.T) { } forward_to = []` var args Arguments - require.NoError(t, river.Unmarshal([]byte(originalCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(originalCfg), &args)) // Set up and start the component. tc, err := componenttest.NewControllerFromID(util.TestLogger(t), "loki.relabel") @@ -409,7 +409,7 @@ func TestRuleGetter(t *testing.T) { regex = "up" } forward_to = []` - require.NoError(t, river.Unmarshal([]byte(updatedCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(updatedCfg), &args)) require.NoError(t, tc.Update(args)) exports = tc.Exports().(Exports) diff --git a/internal/component/loki/rules/kubernetes/rules_test.go b/internal/component/loki/rules/kubernetes/rules_test.go index 5339a22253..196759512a 100644 --- a/internal/component/loki/rules/kubernetes/rules_test.go +++ b/internal/component/loki/rules/kubernetes/rules_test.go @@ -3,7 +3,7 @@ package rules import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -30,6 +30,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go index 0fc61f42f4..5c11ffffee 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go @@ -21,7 +21,7 @@ import ( "github.com/grafana/agent/internal/component/common/loki/client/fake" fnet "github.com/grafana/agent/internal/component/common/net" frelabel "github.com/grafana/agent/internal/component/common/relabel" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/flagext" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/push" @@ -49,7 +49,7 @@ func TestLokiPushTarget(t *testing.T) { action = "labeldrop" regex = "dropme" ` - err := river.Unmarshal([]byte(relabelStr), &relabelRule) + err := syntax.Unmarshal([]byte(relabelStr), &relabelRule) require.NoError(t, err) pt.SetRelabelRules(frelabel.Rules{&relabelRule}) @@ -137,7 +137,7 @@ func TestLokiPushTargetForRedirect(t *testing.T) { action = "labeldrop" regex = "dropme" ` - err := river.Unmarshal([]byte(relabelStr), &relabelRule) + err := syntax.Unmarshal([]byte(relabelStr), &relabelRule) require.NoError(t, err) pt.SetRelabelRules(frelabel.Rules{&relabelRule}) diff --git a/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go b/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go index 69393115a7..efb6cd94b6 100644 --- a/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go +++ b/internal/component/loki/source/azure_event_hubs/azure_event_hubs_test.go @@ -3,7 +3,7 @@ package azure_event_hubs import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -20,7 +20,7 @@ func TestRiverConfigOAuth(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -38,7 +38,7 @@ func TestRiverConfigConnectionString(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -57,6 +57,6 @@ func TestRiverConfigValidateAssignor(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.EqualError(t, err, "assignor value invalid-value is invalid, must be one of: [sticky roundrobin range]") } diff --git a/internal/component/loki/source/docker/docker_test.go b/internal/component/loki/source/docker/docker_test.go index 7457884db3..f83db37cfa 100644 --- a/internal/component/loki/source/docker/docker_test.go +++ b/internal/component/loki/source/docker/docker_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -24,7 +24,7 @@ func Test(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) ctrl, err := componenttest.NewControllerFromID(util.TestLogger(t), "loki.source.docker") @@ -50,7 +50,7 @@ func TestDuplicateTargets(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.NoError(t, err) ctrl, err := componenttest.NewControllerFromID(util.TestLogger(t), "loki.source.docker") diff --git a/internal/component/loki/source/kafka/kafka_test.go b/internal/component/loki/source/kafka/kafka_test.go index db7386ebf8..0efd59bd51 100644 --- a/internal/component/loki/source/kafka/kafka_test.go +++ b/internal/component/loki/source/kafka/kafka_test.go @@ -3,7 +3,7 @@ package kafka import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -37,7 +37,7 @@ func TestTLSRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -57,7 +57,7 @@ func TestSASLRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -81,6 +81,6 @@ func TestSASLOAuthRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } diff --git a/internal/component/loki/source/kubernetes/kubernetes_test.go b/internal/component/loki/source/kubernetes/kubernetes_test.go index 6896a26b3d..d709823cd0 100644 --- a/internal/component/loki/source/kubernetes/kubernetes_test.go +++ b/internal/component/loki/source/kubernetes/kubernetes_test.go @@ -3,7 +3,7 @@ package kubernetes import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -20,7 +20,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -40,6 +40,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/loki/source/podlogs/podlogs_test.go b/internal/component/loki/source/podlogs/podlogs_test.go index 0b616fb429..b76b13b211 100644 --- a/internal/component/loki/source/podlogs/podlogs_test.go +++ b/internal/component/loki/source/podlogs/podlogs_test.go @@ -3,7 +3,7 @@ package podlogs import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -16,7 +16,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -32,6 +32,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/loki/write/write_test.go b/internal/component/loki/write/write_test.go index f71d3e9df4..8a4a366e71 100644 --- a/internal/component/loki/write/write_test.go +++ b/internal/component/loki/write/write_test.go @@ -16,7 +16,7 @@ import ( lsf "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -35,7 +35,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -52,7 +52,7 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } @@ -112,7 +112,7 @@ func TestUnmarshallWalAttrributes(t *testing.T) { } { t.Run(name, func(t *testing.T) { cfg := WalArguments{} - err := river.Unmarshal([]byte(tc.raw), &cfg) + err := syntax.Unmarshal([]byte(tc.raw), &cfg) if tc.errorExpected { require.Error(t, err) return @@ -161,7 +161,7 @@ func testSingleEndpoint(t *testing.T, alterConfig func(arguments *Arguments)) { } `, srv.URL) var args Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) alterConfig(&args) @@ -242,8 +242,8 @@ func testMultipleEndpoint(t *testing.T, alterArgs func(arguments *Arguments)) { external_labels = { "lbl" = "bar" } `, srv2.URL) var args1, args2 Arguments - require.NoError(t, river.Unmarshal([]byte(cfg1), &args1)) - require.NoError(t, river.Unmarshal([]byte(cfg2), &args2)) + require.NoError(t, syntax.Unmarshal([]byte(cfg1), &args1)) + require.NoError(t, syntax.Unmarshal([]byte(cfg2), &args2)) alterArgs(&args1) alterArgs(&args2) @@ -371,7 +371,7 @@ func benchSingleEndpoint(b *testing.B, tc testCase, alterConfig func(arguments * } `, srv.URL) var args Arguments - require.NoError(b, river.Unmarshal([]byte(cfg), &args)) + require.NoError(b, syntax.Unmarshal([]byte(cfg), &args)) alterConfig(&args) diff --git a/internal/component/mimir/rules/kubernetes/rules_test.go b/internal/component/mimir/rules/kubernetes/rules_test.go index 5339a22253..196759512a 100644 --- a/internal/component/mimir/rules/kubernetes/rules_test.go +++ b/internal/component/mimir/rules/kubernetes/rules_test.go @@ -3,7 +3,7 @@ package rules import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -30,6 +30,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index 2ae132438b..e608546d11 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" "github.com/grafana/agent/internal/util/zapadapter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -54,7 +54,7 @@ type Handler struct { Extension otelextension.Extension } -var _ river.Capsule = Handler{} +var _ syntax.Capsule = Handler{} // RiverCapsule marks Handler as a capsule type. func (Handler) RiverCapsule() {} diff --git a/internal/component/otelcol/auth/basic/basic_test.go b/internal/component/otelcol/auth/basic/basic_test.go index 5a60be95a6..aaad0abaa6 100644 --- a/internal/component/otelcol/auth/basic/basic_test.go +++ b/internal/component/otelcol/auth/basic/basic_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/basic" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" @@ -47,7 +47,7 @@ func Test(t *testing.T) { password = "bar" ` var args basic.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) diff --git a/internal/component/otelcol/auth/bearer/bearer_test.go b/internal/component/otelcol/auth/bearer/bearer_test.go index e56b6c37de..7168980b63 100644 --- a/internal/component/otelcol/auth/bearer/bearer_test.go +++ b/internal/component/otelcol/auth/bearer/bearer_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/bearer" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" @@ -82,7 +82,7 @@ func Test(t *testing.T) { require.NoError(t, err) var args bearer.Arguments - require.NoError(t, river.Unmarshal([]byte(tt.riverConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tt.riverConfig), &args)) go func() { err := ctrl.Run(ctx, args) diff --git a/internal/component/otelcol/auth/headers/headers.go b/internal/component/otelcol/auth/headers/headers.go index b216949721..4eb48e5aa1 100644 --- a/internal/component/otelcol/auth/headers/headers.go +++ b/internal/component/otelcol/auth/headers/headers.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" otelcomponent "go.opentelemetry.io/collector/component" @@ -85,11 +85,11 @@ const ( ) var ( - _ river.Validator = (*Action)(nil) + _ syntax.Validator = (*Action)(nil) _ encoding.TextUnmarshaler = (*Action)(nil) ) -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (a *Action) Validate() error { switch *a { case ActionInsert, ActionUpdate, ActionUpsert, ActionDelete: @@ -138,18 +138,18 @@ type Header struct { Action Action `river:"action,attr,optional"` } -var _ river.Defaulter = &Header{} +var _ syntax.Defaulter = &Header{} var DefaultHeader = Header{ Action: ActionUpsert, } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (h *Header) SetToDefault() { *h = DefaultHeader } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (h *Header) Validate() error { err := h.Action.Validate() if err != nil { diff --git a/internal/component/otelcol/auth/headers/headers_test.go b/internal/component/otelcol/auth/headers/headers_test.go index 2d39864354..0c9a586491 100644 --- a/internal/component/otelcol/auth/headers/headers_test.go +++ b/internal/component/otelcol/auth/headers/headers_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/headers" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -48,7 +48,7 @@ func Test(t *testing.T) { } ` var args headers.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -161,7 +161,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { var args headers.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.expectUnmarshalError { require.Error(t, err) diff --git a/internal/component/otelcol/auth/oauth2/oauth2_test.go b/internal/component/otelcol/auth/oauth2/oauth2_test.go index 3f12c762bd..7de2719d73 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2_test.go +++ b/internal/component/otelcol/auth/oauth2/oauth2_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" "gotest.tools/assert" @@ -99,7 +99,7 @@ func Test(t *testing.T) { cfg := tt.configBuilder(srvProvidingTokens.URL) var args oauth2.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) diff --git a/internal/component/otelcol/auth/sigv4/sigv4_test.go b/internal/component/otelcol/auth/sigv4/sigv4_test.go index 24ed7d6a9c..a7db4e7ffe 100644 --- a/internal/component/otelcol/auth/sigv4/sigv4_test.go +++ b/internal/component/otelcol/auth/sigv4/sigv4_test.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth/sigv4" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" @@ -160,7 +160,7 @@ func Test(t *testing.T) { cfg := tt.riverConfig t.Logf("River configuration: %s", cfg) var args sigv4.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) diff --git a/internal/component/otelcol/config_filter_test.go b/internal/component/otelcol/config_filter_test.go index 97409cef6d..b55e02227b 100644 --- a/internal/component/otelcol/config_filter_test.go +++ b/internal/component/otelcol/config_filter_test.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/plog" ) @@ -259,7 +259,7 @@ func TestUnmarshalSeverityLevel(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() var sl otelcol.LogSeverityNumberMatchProperties - err := river.Unmarshal([]byte(tt.cfg), &sl) + err := syntax.Unmarshal([]byte(tt.cfg), &sl) if tt.expectErr { require.Error(t, err) } else { diff --git a/internal/component/otelcol/config_retry.go b/internal/component/otelcol/config_retry.go index 7c94dba3d5..df2fb07720 100644 --- a/internal/component/otelcol/config_retry.go +++ b/internal/component/otelcol/config_retry.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" otelexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -20,11 +20,11 @@ type RetryArguments struct { } var ( - _ river.Defaulter = (*RetryArguments)(nil) - _ river.Validator = (*RetryArguments)(nil) + _ syntax.Defaulter = (*RetryArguments)(nil) + _ syntax.Validator = (*RetryArguments)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *RetryArguments) SetToDefault() { *args = RetryArguments{ Enabled: true, diff --git a/internal/component/otelcol/connector/host_info/host_info.go b/internal/component/otelcol/connector/host_info/host_info.go index 0468830fe7..5a32e297d4 100644 --- a/internal/component/otelcol/connector/host_info/host_info.go +++ b/internal/component/otelcol/connector/host_info/host_info.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" ) @@ -38,12 +38,12 @@ type Arguments struct { } var ( - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) _ connector.Arguments = (*Arguments)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = Arguments{ HostIdentifiers: []string{"host.id"}, @@ -51,7 +51,7 @@ func (args *Arguments) SetToDefault() { } } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { if len(args.HostIdentifiers) == 0 { return fmt.Errorf("host_identifiers must not be empty") diff --git a/internal/component/otelcol/connector/host_info/host_info_test.go b/internal/component/otelcol/connector/host_info/host_info_test.go index b7db269e6d..6e3ad58011 100644 --- a/internal/component/otelcol/connector/host_info/host_info_test.go +++ b/internal/component/otelcol/connector/host_info/host_info_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -58,7 +58,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index 96705966fb..30f43c6dd9 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" otelcomponent "go.opentelemetry.io/collector/component" @@ -73,11 +73,11 @@ func (sc *StoreConfig) SetToDefault() { } var ( - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = Arguments{ LatencyHistogramBuckets: []time.Duration{ @@ -117,7 +117,7 @@ func (args *Arguments) SetToDefault() { args.Store.SetToDefault() } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { if args.CacheLoop <= 0 { return fmt.Errorf("cache_loop must be greater than 0") diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go index adcc03c3a5..390c07bd2f 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" "github.com/stretchr/testify/require" ) @@ -137,7 +137,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args servicegraph.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs.go b/internal/component/otelcol/connector/spanlogs/spanlogs.go index 5ccf94e6a1..a6622090b9 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) func init() { @@ -51,7 +51,7 @@ type OverrideConfig struct { } var ( - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) ) // DefaultArguments holds default settings for Arguments. @@ -66,7 +66,7 @@ var DefaultArguments = Arguments{ }, } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go index 5b1033a84a..093362aeeb 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -25,7 +25,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args spanlogs.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -700,7 +700,7 @@ func Test_ComponentIO(t *testing.T) { for _, tt := range tests { t.Run(tt.testName, func(t *testing.T) { var args spanlogs.Arguments - require.NoError(t, river.Unmarshal([]byte(tt.cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tt.cfg), &args)) require.EqualValues(t, tt.expectedUnmarshaledCfg, args) testRunProcessor(t, tt.cfg, processortest.NewTraceToLogSignal(tt.inputTraceJson, tt.expectedOutputLogJson)) diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index 54e8fd78be..1c3b8f313e 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/connector" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -63,8 +63,8 @@ type Arguments struct { } var ( - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) _ connector.Arguments = (*Arguments)(nil) ) @@ -80,12 +80,12 @@ var DefaultArguments = Arguments{ MetricsFlushInterval: 15 * time.Second, } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = DefaultArguments } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { if args.DimensionsCacheSize <= 0 { return fmt.Errorf( diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go index 4e8c20b158..5a99a51de0 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "github.com/stretchr/testify/require" ) @@ -316,7 +316,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args spanmetrics.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return @@ -348,7 +348,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args spanmetrics.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -743,7 +743,7 @@ func Test_ComponentIO(t *testing.T) { for _, tt := range tests { t.Run(tt.testName, func(t *testing.T) { var args spanmetrics.Arguments - require.NoError(t, river.Unmarshal([]byte(tt.cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tt.cfg), &args)) testRunProcessor(t, tt.cfg, processortest.NewTraceToMetricSignal(tt.inputTraceJson, tt.expectedOutputLogJson)) }) diff --git a/internal/component/otelcol/connector/spanmetrics/types.go b/internal/component/otelcol/connector/spanmetrics/types.go index 8c88c19d8c..a604efaab8 100644 --- a/internal/component/otelcol/connector/spanmetrics/types.go +++ b/internal/component/otelcol/connector/spanmetrics/types.go @@ -5,7 +5,7 @@ import ( "strings" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" ) @@ -63,8 +63,8 @@ type HistogramConfig struct { } var ( - _ river.Defaulter = (*HistogramConfig)(nil) - _ river.Validator = (*HistogramConfig)(nil) + _ syntax.Defaulter = (*HistogramConfig)(nil) + _ syntax.Validator = (*HistogramConfig)(nil) ) var DefaultHistogramConfig = HistogramConfig{ @@ -137,16 +137,16 @@ type ExponentialHistogramConfig struct { } var ( - _ river.Defaulter = (*ExponentialHistogramConfig)(nil) - _ river.Validator = (*ExponentialHistogramConfig)(nil) + _ syntax.Defaulter = (*ExponentialHistogramConfig)(nil) + _ syntax.Validator = (*ExponentialHistogramConfig)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (ehc *ExponentialHistogramConfig) SetToDefault() { ehc.MaxSize = 160 } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (ehc *ExponentialHistogramConfig) Validate() error { if ehc.MaxSize <= 0 { return fmt.Errorf("max_size must be greater than 0") @@ -167,7 +167,7 @@ type ExplicitHistogramConfig struct { } var ( - _ river.Defaulter = (*ExplicitHistogramConfig)(nil) + _ syntax.Defaulter = (*ExplicitHistogramConfig)(nil) ) func (hc *ExplicitHistogramConfig) SetToDefault() { diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index 51f1bc2a33..ab9536ea5b 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/component/otelcol/exporter" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" otelcomponent "go.opentelemetry.io/collector/component" otelconfigauth "go.opentelemetry.io/collector/config/configauth" @@ -51,11 +51,11 @@ type Arguments struct { var ( _ exporter.Arguments = Arguments{} - _ river.Defaulter = &Arguments{} - _ river.Validator = &Arguments{} + _ syntax.Defaulter = &Arguments{} + _ syntax.Validator = &Arguments{} ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = Arguments{ RoutingKey: "traceID", @@ -64,7 +64,7 @@ func (args *Arguments) SetToDefault() { args.Protocol.OTLP.SetToDefault() } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { //TODO(ptodev): Add support for "resource" and "metric" routing keys later. // The reason we can't add them yet is that otelcol.exporter.loadbalancing @@ -177,7 +177,7 @@ type DNSResolver struct { Timeout time.Duration `river:"timeout,attr,optional"` } -var _ river.Defaulter = &DNSResolver{} +var _ syntax.Defaulter = &DNSResolver{} // DefaultDNSResolver holds default values for DNSResolver. var DefaultDNSResolver = DNSResolver{ @@ -186,7 +186,7 @@ var DefaultDNSResolver = DNSResolver{ Timeout: 1 * time.Second, } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *DNSResolver) SetToDefault() { *args = DefaultDNSResolver } @@ -206,9 +206,9 @@ type KubernetesResolver struct { Ports []int32 `river:"ports,attr,optional"` } -var _ river.Defaulter = &KubernetesResolver{} +var _ syntax.Defaulter = &KubernetesResolver{} -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *KubernetesResolver) SetToDefault() { if args == nil { args = &KubernetesResolver{} @@ -257,7 +257,7 @@ type GRPCClientArguments struct { Auth *auth.Handler `river:"auth,attr,optional"` } -var _ river.Defaulter = &GRPCClientArguments{} +var _ syntax.Defaulter = &GRPCClientArguments{} // Convert converts args into the upstream type. func (args *GRPCClientArguments) Convert() *otelconfiggrpc.GRPCClientSettings { @@ -307,7 +307,7 @@ func (args *GRPCClientArguments) Extensions() map[otelcomponent.ID]otelextension return m } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *GRPCClientArguments) SetToDefault() { *args = GRPCClientArguments{ Headers: map[string]string{}, diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index aaee9ed9f1..9b04ba1b23 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/configgrpc" @@ -258,7 +258,7 @@ func TestConfigConversion(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args loadbalancing.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) actual, err := args.Convert() require.NoError(t, err) require.Equal(t, &tc.expected, actual.(*loadbalancingexporter.Config)) @@ -337,7 +337,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args loadbalancing.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/exporter/otlp/otlp_test.go b/internal/component/otelcol/exporter/otlp/otlp_test.go index 48add799d4..88fa01e7b1 100644 --- a/internal/component/otelcol/exporter/otlp/otlp_test.go +++ b/internal/component/otelcol/exporter/otlp/otlp_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" @@ -51,7 +51,7 @@ func Test(t *testing.T) { } `, tracesServer) var args otlp.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) require.Equal(t, args.DebugMetricsConfig().DisableHighCardinalityMetrics, true) go func() { @@ -194,7 +194,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args otlp.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go index 19a58725c2..1e49d8e63e 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" @@ -55,7 +55,7 @@ func Test(t *testing.T) { } `, srv.URL) var args otlphttp.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -165,7 +165,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args otlphttp.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/exporter/prometheus/prometheus_test.go b/internal/component/otelcol/exporter/prometheus/prometheus_test.go index 8f78b0833f..f02b18f23b 100644 --- a/internal/component/otelcol/exporter/prometheus/prometheus_test.go +++ b/internal/component/otelcol/exporter/prometheus/prometheus_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" ) @@ -66,7 +66,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args prometheus.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.EqualError(t, err, tc.errorMsg) return diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go index dab1d1e093..159ba0ccf6 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" ) @@ -100,7 +100,7 @@ func startJaegerRemoteSamplingServer(t *testing.T, cfg string, listenAddr string require.NoError(t, err) var args jaeger_remote_sampling.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -134,7 +134,7 @@ func TestUnmarshalFailsWithNoServerConfig(t *testing.T) { ` var args jaeger_remote_sampling.Arguments - err := river.Unmarshal([]byte(cfg), &args) + err := syntax.Unmarshal([]byte(cfg), &args) require.ErrorContains(t, err, "http or grpc must be configured to serve the sampling document") } @@ -230,7 +230,7 @@ func TestUnmarshalUsesDefaults(t *testing.T) { for _, tc := range tcs { var args jaeger_remote_sampling.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) require.NoError(t, err) require.Equal(t, tc.expected, args) } @@ -264,7 +264,7 @@ func TestUnmarshalRequiresExactlyOneSource(t *testing.T) { for _, tc := range tcs { var args jaeger_remote_sampling.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) require.EqualError(t, err, tc.expectedError) } } diff --git a/internal/component/otelcol/processor/attributes/attributes_test.go b/internal/component/otelcol/processor/attributes/attributes_test.go index 69731121f5..3a488c45f1 100644 --- a/internal/component/otelcol/processor/attributes/attributes_test.go +++ b/internal/component/otelcol/processor/attributes/attributes_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" "github.com/stretchr/testify/require" @@ -84,7 +84,7 @@ func TestSeverityLevelMatchesOtel(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() var matchProperties otelcol.MatchProperties - err := river.Unmarshal([]byte(tt.cfg), &matchProperties) + err := syntax.Unmarshal([]byte(tt.cfg), &matchProperties) require.NoError(t, err) @@ -118,7 +118,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -152,7 +152,7 @@ func Test_Insert(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -222,7 +222,7 @@ func Test_RegexExtract(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -290,7 +290,7 @@ func Test_Update(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -371,7 +371,7 @@ func Test_Upsert(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -442,7 +442,7 @@ func Test_Delete(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -507,7 +507,7 @@ func Test_Hash(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -577,7 +577,7 @@ func Test_Convert(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -646,7 +646,7 @@ func Test_ExcludeMulti(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -874,7 +874,7 @@ func Test_ExcludeResources(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -1036,7 +1036,7 @@ func Test_ExcludeLibrary(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -1212,7 +1212,7 @@ func Test_ExcludeLibraryAnyVersion(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -1389,7 +1389,7 @@ func Test_ExcludeLibraryBlankVersion(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -1621,7 +1621,7 @@ func Test_ExcludeServices(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -1844,7 +1844,7 @@ func Test_SelectiveProcessing(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2011,7 +2011,7 @@ func Test_Complex(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2129,7 +2129,7 @@ func Test_ExampleActions(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2238,7 +2238,7 @@ func Test_Regexp(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2370,7 +2370,7 @@ func Test_Regexp2(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2462,7 +2462,7 @@ func Test_LogBodyRegexp(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2584,7 +2584,7 @@ func Test_LogSeverityTextsRegexp(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2709,7 +2709,7 @@ func Test_LogSeverity(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2829,7 +2829,7 @@ func Test_FromContext(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -2930,7 +2930,7 @@ func Test_MetricNames(t *testing.T) { } ` var args attributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/processor/batch/batch_test.go b/internal/component/otelcol/processor/batch/batch_test.go index bb14a136ba..10b7add2d8 100644 --- a/internal/component/otelcol/processor/batch/batch_test.go +++ b/internal/component/otelcol/processor/batch/batch_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" @@ -35,7 +35,7 @@ func Test(t *testing.T) { } ` var args batch.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) // Override our arguments so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) @@ -171,7 +171,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { var args batch.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) require.NoError(t, err) ext, err := args.Convert() diff --git a/internal/component/otelcol/processor/discovery/discovery.go b/internal/component/otelcol/processor/discovery/discovery.go index 250156dee9..2607ada090 100644 --- a/internal/component/otelcol/processor/discovery/discovery.go +++ b/internal/component/otelcol/processor/discovery/discovery.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) func init() { @@ -41,11 +41,11 @@ type Arguments struct { } var ( - _ river.Defaulter = (*Arguments)(nil) - _ river.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = Arguments{ OperationType: promsdconsumer.OperationTypeUpsert, @@ -59,7 +59,7 @@ func (args *Arguments) SetToDefault() { } } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { err := promsdconsumer.ValidateOperationType(args.OperationType) if err != nil { diff --git a/internal/component/otelcol/processor/discovery/discovery_test.go b/internal/component/otelcol/processor/discovery/discovery_test.go index a33d4c1573..ab0aa96658 100644 --- a/internal/component/otelcol/processor/discovery/discovery_test.go +++ b/internal/component/otelcol/processor/discovery/discovery_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" semconv "go.opentelemetry.io/collector/semconv/v1.5.0" ) @@ -27,7 +27,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args discovery.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -55,7 +55,7 @@ func Test_DefaultConfig(t *testing.T) { } ` var args discovery.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) var defaultArgs discovery.Arguments defaultArgs.SetToDefault() @@ -157,7 +157,7 @@ func Test_Insert(t *testing.T) { } ` var args discovery.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) var defaultArgs discovery.Arguments defaultArgs.SetToDefault() @@ -280,7 +280,7 @@ func Test_Update(t *testing.T) { } ` var args discovery.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) var defaultArgs discovery.Arguments defaultArgs.SetToDefault() @@ -404,7 +404,7 @@ func Test_PodAssociationLabels(t *testing.T) { `, podAssociationLabel) var args discovery.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) require.Equal(t, args.OperationType, promsdconsumer.OperationTypeInsert) require.Equal(t, args.PodAssociations, []string{podAssociationLabel}) diff --git a/internal/component/otelcol/processor/filter/filter_test.go b/internal/component/otelcol/processor/filter/filter_test.go index 58eb9861ed..9589e646cb 100644 --- a/internal/component/otelcol/processor/filter/filter_test.go +++ b/internal/component/otelcol/processor/filter/filter_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/filter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" "github.com/stretchr/testify/require" @@ -161,7 +161,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args filter.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errMsg != "" { require.ErrorContains(t, err, tc.errMsg) return diff --git a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go index cefcecc6dd..a10d18abf7 100644 --- a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go +++ b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/k8sattributes" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" "github.com/stretchr/testify/require" ) @@ -32,7 +32,7 @@ func Test_Extract(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -70,7 +70,7 @@ func Test_ExtractAnnotations(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -99,7 +99,7 @@ func Test_FilterNodeEnvironmentVariable(t *testing.T) { var args k8sattributes.Arguments testHostname := "test-hostname" t.Setenv("K8S_ATTRIBUTES_TEST_HOSTNAME", testHostname) - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -120,7 +120,7 @@ func Test_FilterNamespace(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -158,7 +158,7 @@ func Test_FilterOps(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -190,7 +190,7 @@ func Test_DefaultToServiceAccountAuth(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -225,7 +225,7 @@ func Test_PodAssociation(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -272,7 +272,7 @@ func Test_PodAssociationPair(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -303,7 +303,7 @@ func Test_Passthrough(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -321,7 +321,7 @@ func Test_Exclude(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -345,7 +345,7 @@ func Test_Exclude(t *testing.T) { } ` var args k8sattributes.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go index 03342d47f0..d95ec2c483 100644 --- a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go +++ b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" @@ -36,7 +36,7 @@ func Test(t *testing.T) { } ` var args memorylimiter.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) // Override our arguments so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index a3096e8c8b..ca33f9792f 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/processor" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -40,8 +40,8 @@ type Arguments struct { var ( _ processor.Arguments = Arguments{} - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) ) // DefaultArguments holds default settings for Arguments. @@ -49,12 +49,12 @@ var DefaultArguments = Arguments{ AttributeSource: "traceID", } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = DefaultArguments } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { cfg, err := args.Convert() if err != nil { diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go index 0f67443034..bf0192a1d4 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/processortest" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" "github.com/stretchr/testify/require" ) @@ -73,7 +73,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args probabilisticsampler.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.EqualError(t, err, tc.errorMsg) return @@ -101,7 +101,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args probabilisticsampler.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -126,7 +126,7 @@ func TestLogProcessing(t *testing.T) { } ` var args probabilisticsampler.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) var inputLogs = `{ "resourceLogs": [{ @@ -171,7 +171,7 @@ func TestTraceProcessing(t *testing.T) { ` var args probabilisticsampler.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) var inputTraces = `{ "resourceSpans": [{ diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go index e4fb57a449..41ccdf00b9 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -2,7 +2,7 @@ package ec2 import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "ec2" @@ -30,9 +30,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go index 07f2dfb6e4..101941f223 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -2,7 +2,7 @@ package ecs import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "ecs" @@ -31,9 +31,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go index b357b793ca..e3bc1c962d 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -2,7 +2,7 @@ package eks import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "eks" @@ -19,9 +19,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go index 8fa5ba5b18..2593f17c6f 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -2,7 +2,7 @@ package elasticbeanstalk import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "elasticbeanstalk" @@ -22,9 +22,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go index 5b6654d5cd..b10990c6c3 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -2,7 +2,7 @@ package lambda import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "lambda" @@ -26,9 +26,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go index 7592436521..a38838e3d0 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -2,7 +2,7 @@ package aks import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "aks" @@ -19,9 +19,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go index dcfcfb7a03..30a40298b4 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -2,7 +2,7 @@ package azure import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "azure" @@ -27,9 +27,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index 70d88b7e77..f35f8f8002 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -2,7 +2,7 @@ package consul import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" ) @@ -51,9 +51,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go index 9f3f15c21d..a0b2828fe0 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -2,7 +2,7 @@ package docker import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "docker" @@ -19,9 +19,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go index 7b86d99a29..85ebffb55e 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -2,7 +2,7 @@ package gcp import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "gcp" @@ -34,9 +34,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go index 4e66e85f38..a5e36ff14b 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -2,7 +2,7 @@ package heroku import ( rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "heroku" @@ -25,9 +25,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go index 4b53b48ced..c0922acdfd 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -3,7 +3,7 @@ package k8snode import ( "github.com/grafana/agent/internal/component/otelcol" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "kubernetes_node" @@ -44,9 +44,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (c *Config) SetToDefault() { *c = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go index 1ea7173cf1..4c53dcc74e 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -3,7 +3,7 @@ package openshift import ( "github.com/grafana/agent/internal/component/otelcol" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "openshift" @@ -34,9 +34,9 @@ var DefaultArguments = Config{ }, } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Config) SetToDefault() { *args = DefaultArguments } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index 091cd5e7ac..4ab51788a1 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -4,7 +4,7 @@ import ( "fmt" rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) const Name = "system" @@ -19,9 +19,9 @@ type Config struct { ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` } -var _ river.Defaulter = (*Config)(nil) +var _ syntax.Defaulter = (*Config)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (c *Config) SetToDefault() { *c = Config{ HostnameSources: []string{"dns", "os"}, diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index 40b366db73..8a6a8a8f61 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -22,7 +22,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" "github.com/grafana/agent/internal/featuregate" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" otelcomponent "go.opentelemetry.io/collector/component" @@ -137,11 +137,11 @@ func (dc *DetectorConfig) SetToDefault() { var ( _ processor.Arguments = Arguments{} - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) + _ syntax.Validator = (*Arguments)(nil) + _ syntax.Defaulter = (*Arguments)(nil) ) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (args *Arguments) SetToDefault() { *args = Arguments{ Detectors: []string{"env"}, @@ -151,7 +151,7 @@ func (args *Arguments) SetToDefault() { args.DetectorConfig.SetToDefault() } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (args *Arguments) Validate() error { if len(args.Detectors) == 0 { return fmt.Errorf("at least one detector must be specified") diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go index 64285055f5..8a70f5d96d 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -19,7 +19,7 @@ import ( kubernetes_node "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" "github.com/stretchr/testify/require" @@ -1507,7 +1507,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args resourcedetection.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return diff --git a/internal/component/otelcol/processor/span/span_test.go b/internal/component/otelcol/processor/span/span_test.go index 0b31f0dcc7..6a02063fba 100644 --- a/internal/component/otelcol/processor/span/span_test.go +++ b/internal/component/otelcol/processor/span/span_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/span" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" "github.com/stretchr/testify/require" @@ -154,7 +154,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { var args span.Arguments - err := river.Unmarshal([]byte(tc.flowCfg), &args) + err := syntax.Unmarshal([]byte(tc.flowCfg), &args) if tc.expectUnmarshalError { require.Error(t, err) @@ -191,7 +191,7 @@ func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorCon require.NoError(t, err) var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(processorConfig), &args)) // Override the arguments so signals get forwarded to the test channel. args.Output = testSignal.MakeOutput() @@ -219,7 +219,7 @@ func Test_UpdateSpanNameFromAttributesSuccessfully(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -287,7 +287,7 @@ func Test_UpdateSpanNameFromAttributesUnsuccessfully(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -348,7 +348,7 @@ func Test_UpdateSpanNameFromAttributesNoSeparatorSuccessfully(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -416,7 +416,7 @@ func Test_ToAttributes(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -494,7 +494,7 @@ func Test_IncludeExclude(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -608,7 +608,7 @@ func Test_StatusError(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) @@ -668,7 +668,7 @@ func Test_StatusOk(t *testing.T) { } ` var args span.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) convertedArgs, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go index 729a21c1e5..4ae4e53801 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/ptrace" @@ -33,7 +33,7 @@ func TestBadRiverConfig(t *testing.T) { ` var args Arguments - require.Error(t, river.Unmarshal([]byte(exampleBadRiverConfig), &args), "num_traces must be greater than zero") + require.Error(t, syntax.Unmarshal([]byte(exampleBadRiverConfig), &args), "num_traces must be greater than zero") } func TestBadRiverConfigErrorMode(t *testing.T) { @@ -62,7 +62,7 @@ func TestBadRiverConfigErrorMode(t *testing.T) { ` var args Arguments - require.ErrorContains(t, river.Unmarshal([]byte(exampleBadRiverConfig), &args), "\"\" unknown error mode") + require.ErrorContains(t, syntax.Unmarshal([]byte(exampleBadRiverConfig), &args), "\"\" unknown error mode") } func TestBadOtelConfig(t *testing.T) { @@ -86,7 +86,7 @@ func TestBadOtelConfig(t *testing.T) { require.NoError(t, err) var args Arguments - require.NoError(t, river.Unmarshal([]byte(exampleBadOtelConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(exampleBadOtelConfig), &args)) // Override our arguments so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) @@ -353,7 +353,7 @@ func TestBigConfig(t *testing.T) { require.NoError(t, err) var args Arguments - require.NoError(t, river.Unmarshal([]byte(exampleBigConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(exampleBigConfig), &args)) // Override our arguments so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) @@ -388,7 +388,7 @@ func TestTraceProcessing(t *testing.T) { require.NoError(t, err) var args Arguments - require.NoError(t, river.Unmarshal([]byte(exampleSmallConfig), &args)) + require.NoError(t, syntax.Unmarshal([]byte(exampleSmallConfig), &args)) // Override our arguments so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) diff --git a/internal/component/otelcol/processor/tail_sampling/types.go b/internal/component/otelcol/processor/tail_sampling/types.go index c9c66fdf4c..36c97adca2 100644 --- a/internal/component/otelcol/processor/tail_sampling/types.go +++ b/internal/component/otelcol/processor/tail_sampling/types.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" tsp "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" @@ -215,11 +215,11 @@ const ( ) var ( - _ river.Validator = (*ErrorMode)(nil) + _ syntax.Validator = (*ErrorMode)(nil) _ encoding.TextUnmarshaler = (*ErrorMode)(nil) ) -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (e *ErrorMode) Validate() error { if e == nil { return nil diff --git a/internal/component/otelcol/processor/transform/transform_test.go b/internal/component/otelcol/processor/transform/transform_test.go index 73a44bce43..291e3492f4 100644 --- a/internal/component/otelcol/processor/transform/transform_test.go +++ b/internal/component/otelcol/processor/transform/transform_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/component/otelcol/processor/transform" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "github.com/stretchr/testify/require" @@ -523,7 +523,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args transform.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) return diff --git a/internal/component/otelcol/receiver/jaeger/jaeger_test.go b/internal/component/otelcol/receiver/jaeger/jaeger_test.go index f07d319ba2..d8296a210d 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger_test.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/jaeger" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" ) @@ -34,7 +34,7 @@ func Test(t *testing.T) { output { /* no-op */ } `, httpAddr) var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -57,7 +57,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { ` var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) defaults := &jaeger.GRPC{} defaults.SetToDefault() @@ -75,7 +75,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { ` var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) defaults := &jaeger.ThriftHTTP{} defaults.SetToDefault() @@ -93,7 +93,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { ` var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) defaults := &jaeger.ThriftBinary{} defaults.SetToDefault() @@ -111,7 +111,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { ` var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) defaults := &jaeger.ThriftCompact{} defaults.SetToDefault() @@ -179,7 +179,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args jaeger.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/receiver/kafka/kafka_test.go b/internal/component/otelcol/receiver/kafka/kafka_test.go index 16d93a773f..4238a19c91 100644 --- a/internal/component/otelcol/receiver/kafka/kafka_test.go +++ b/internal/component/otelcol/receiver/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" @@ -116,7 +116,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args kafka.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) require.NoError(t, err) actualPtr, err := args.Convert() @@ -358,7 +358,7 @@ func TestArguments_Auth(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args kafka.Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) require.NoError(t, err) actualPtr, err := args.Convert() @@ -425,7 +425,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args kafka.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/receiver/loki/loki_test.go b/internal/component/otelcol/receiver/loki/loki_test.go index 9ec47bb48a..23b766f59e 100644 --- a/internal/component/otelcol/receiver/loki/loki_test.go +++ b/internal/component/otelcol/receiver/loki/loki_test.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -30,7 +30,7 @@ func Test(t *testing.T) { } ` var args Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) // Override our settings so logs get forwarded to logCh. logCh := make(chan plog.Logs) diff --git a/internal/component/otelcol/receiver/opencensus/opencensus_test.go b/internal/component/otelcol/receiver/opencensus/opencensus_test.go index b4eb5aa5e0..07e3eae23f 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus_test.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/opencensus" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "github.com/phayes/freeport" "github.com/stretchr/testify/require" @@ -33,7 +33,7 @@ func Test(t *testing.T) { `, httpAddr) var args opencensus.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -47,7 +47,7 @@ func TestDefaultArguments_UnmarshalRiver(t *testing.T) { in := `output { /* no-op */ }` var args opencensus.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) ext, err := args.Convert() require.NoError(t, err) otelArgs, ok := (ext).(*opencensusreceiver.Config) @@ -74,7 +74,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { `, httpAddr) var args opencensus.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) args.Convert() ext, err := args.Convert() require.NoError(t, err) @@ -145,7 +145,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args opencensus.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/receiver/otlp/otlp_test.go b/internal/component/otelcol/receiver/otlp/otlp_test.go index fea621cb47..315753b77a 100644 --- a/internal/component/otelcol/receiver/otlp/otlp_test.go +++ b/internal/component/otelcol/receiver/otlp/otlp_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/phayes/freeport" "github.com/stretchr/testify/require" @@ -46,7 +46,7 @@ func Test(t *testing.T) { require.NoError(t, err) var args otlp.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) // Override our settings so traces get forwarded to traceCh. traceCh := make(chan ptrace.Traces) @@ -133,7 +133,7 @@ func TestUnmarshalGrpc(t *testing.T) { } ` var args otlp.Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) } @@ -147,7 +147,7 @@ func TestUnmarshalHttp(t *testing.T) { } ` var args otlp.Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, "/v1/logs", args.HTTP.LogsURLPath) assert.Equal(t, "/v1/metrics", args.HTTP.MetricsURLPath) @@ -167,7 +167,7 @@ func TestUnmarshalHttpUrls(t *testing.T) { } ` var args otlp.Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, "custom/logs", args.HTTP.LogsURLPath) assert.Equal(t, "custom/metrics", args.HTTP.MetricsURLPath) @@ -227,7 +227,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args otlp.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/receiver/prometheus/prometheus_test.go b/internal/component/otelcol/receiver/prometheus/prometheus_test.go index 6ba3a43f65..7d1836ad29 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus_test.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus_test.go @@ -11,7 +11,7 @@ import ( flowprometheus "github.com/grafana/agent/internal/component/prometheus" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" @@ -36,7 +36,7 @@ func Test(t *testing.T) { } ` var args prometheus.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) // Override our settings so metrics get forwarded to metricCh. metricCh := make(chan pmetric.Metrics) diff --git a/internal/component/otelcol/receiver/vcenter/vcenter_test.go b/internal/component/otelcol/receiver/vcenter/vcenter_test.go index dc78595718..5e2cd07128 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter_test.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver/vcenter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" "github.com/stretchr/testify/require" ) @@ -160,7 +160,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { ` var args vcenter.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) args.Convert() ext, err := args.Convert() require.NoError(t, err) @@ -285,7 +285,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args vcenter.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/otelcol/receiver/zipkin/zipkin_test.go b/internal/component/otelcol/receiver/zipkin/zipkin_test.go index 5fec2fa441..29839c46b7 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin_test.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin_test.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/receiver/zipkin" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" "github.com/phayes/freeport" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ func TestRun(t *testing.T) { `, httpAddr) var args zipkin.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -60,7 +60,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { `, httpAddr) var args zipkin.Arguments - require.NoError(t, river.Unmarshal([]byte(in), &args)) + require.NoError(t, syntax.Unmarshal([]byte(in), &args)) require.Equal(t, args.DebugMetricsConfig().DisableHighCardinalityMetrics, true) ext, err := args.Convert() require.NoError(t, err) @@ -132,7 +132,7 @@ func TestDebugMetricsConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args zipkin.Arguments - require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(tc.agentCfg), &args)) _, err := args.Convert() require.NoError(t, err) diff --git a/internal/component/prometheus/exporter/blackbox/blackbox_test.go b/internal/component/prometheus/exporter/blackbox/blackbox_test.go index 15440fbc23..39ecf1bb79 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox_test.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" blackbox_config "github.com/prometheus/blackbox_exporter/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -29,7 +29,7 @@ func TestUnmarshalRiver(t *testing.T) { probe_timeout_offset = "0.5s" ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "modules.yml", args.ConfigFile) require.Equal(t, 2, len(args.Targets)) @@ -59,7 +59,7 @@ func TestUnmarshalRiverWithInlineConfig(t *testing.T) { probe_timeout_offset = "0.5s" ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "", args.ConfigFile) var blackboxConfig blackbox_config.Config @@ -94,7 +94,7 @@ func TestUnmarshalRiverWithInlineConfigYaml(t *testing.T) { probe_timeout_offset = "0.5s" ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "", args.ConfigFile) var blackboxConfig blackbox_config.Config @@ -183,7 +183,7 @@ func TestUnmarshalRiverWithInvalidConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { var args Arguments - require.EqualError(t, river.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) + require.EqualError(t, syntax.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) }) } } diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go index af53a3a935..fd9c4edaf2 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/cadvisor" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -29,7 +29,7 @@ docker_tls_key = "docker_tls_key" docker_tls_ca = "docker_tls_ca" ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) expected := Arguments{ StoreContainerLabels: true, diff --git a/internal/component/prometheus/exporter/cloudwatch/config.go b/internal/component/prometheus/exporter/cloudwatch/config.go index 4e867374d5..bd751d8ceb 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config.go +++ b/internal/component/prometheus/exporter/cloudwatch/config.go @@ -6,7 +6,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" yaceConf "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/config" yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" ) @@ -98,7 +98,7 @@ type Metric struct { NilToZero *bool `river:"nil_to_zero,attr,optional"` } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (a *Arguments) SetToDefault() { *a = defaults } @@ -246,7 +246,7 @@ func toYACEDiscoveryJob(rj DiscoveryJob) *yaceConf.Job { // getHash calculates the MD5 hash of the river representation of the config. func getHash(a Arguments) string { - bytes, err := river.Marshal(a) + bytes, err := syntax.Marshal(a) if err != nil { return "" } diff --git a/internal/component/prometheus/exporter/cloudwatch/config_test.go b/internal/component/prometheus/exporter/cloudwatch/config_test.go index ef5611da5a..c66d472612 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config_test.go +++ b/internal/component/prometheus/exporter/cloudwatch/config_test.go @@ -3,7 +3,7 @@ package cloudwatch import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" yaceConf "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/config" yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" "github.com/stretchr/testify/require" @@ -457,7 +457,7 @@ func TestCloudwatchComponentConfig(t *testing.T) { } { t.Run(name, func(t *testing.T) { args := Arguments{} - err := river.Unmarshal([]byte(tc.raw), &args) + err := syntax.Unmarshal([]byte(tc.raw), &args) if tc.expectUnmarshallErr { require.Error(t, err) return diff --git a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go index 01d3fe5715..07c863f334 100644 --- a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go +++ b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) @@ -15,7 +15,7 @@ func TestUnmarshalRiver(t *testing.T) { expose_leases = true ` var args Arguments - err := river.Unmarshal([]byte(rawCfg), &args) + err := syntax.Unmarshal([]byte(rawCfg), &args) assert.NoError(t, err) expected := Arguments{ @@ -29,7 +29,7 @@ func TestUnmarshalRiver(t *testing.T) { func TestUnmarshalRiverDefaults(t *testing.T) { rawCfg := `` var args Arguments - err := river.Unmarshal([]byte(rawCfg), &args) + err := syntax.Unmarshal([]byte(rawCfg), &args) assert.NoError(t, err) expected := DefaultArguments diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go index 60708e447f..4983258419 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go @@ -6,7 +6,7 @@ import ( commonCfg "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promCfg "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -37,7 +37,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -90,7 +90,7 @@ func TestConvert(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) res := args.Convert() diff --git a/internal/component/prometheus/exporter/gcp/gcp_test.go b/internal/component/prometheus/exporter/gcp/gcp_test.go index 020c91422c..aad6a55c7d 100644 --- a/internal/component/prometheus/exporter/gcp/gcp_test.go +++ b/internal/component/prometheus/exporter/gcp/gcp_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -110,7 +110,7 @@ func TestConvertConfig(t *testing.T) { } { t.Run(name, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(tc.riverCfg), &args) + err := syntax.Unmarshal([]byte(tc.riverCfg), &args) if tc.expectedUnmarshalError != "" { require.EqualError(t, err, tc.expectedUnmarshalError) } else { diff --git a/internal/component/prometheus/exporter/github/github_test.go b/internal/component/prometheus/exporter/github/github_test.go index be63c76c95..d22248ce96 100644 --- a/internal/component/prometheus/exporter/github/github_test.go +++ b/internal/component/prometheus/exporter/github/github_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -16,7 +16,7 @@ func TestUnmarshalRiver(t *testing.T) { api_url = "https://some-other-api.github.com" ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "/etc/github-api-token", args.APITokenFile) require.Equal(t, []string{"grafana/agent"}, args.Repositories) diff --git a/internal/component/prometheus/exporter/kafka/kafka_test.go b/internal/component/prometheus/exporter/kafka/kafka_test.go index 9d7b134b96..287d2a601f 100644 --- a/internal/component/prometheus/exporter/kafka/kafka_test.go +++ b/internal/component/prometheus/exporter/kafka/kafka_test.go @@ -5,7 +5,7 @@ import ( "github.com/grafana/agent/internal/component/discovery" "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -23,7 +23,7 @@ func TestRiverUnmarshal(t *testing.T) { groups_filter_regex = ".*" ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -56,7 +56,7 @@ func TestUnmarshalInvalid(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(validRiverConfig), &args) + err := syntax.Unmarshal([]byte(validRiverConfig), &args) require.NoError(t, err) invalidRiverConfig := ` @@ -64,7 +64,7 @@ func TestUnmarshalInvalid(t *testing.T) { kafka_uris = "localhost:9092" ` var invalidArgs Arguments - err = river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + err = syntax.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) } @@ -117,6 +117,6 @@ func TestSASLPassword(t *testing.T) { // #6044 ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } diff --git a/internal/component/prometheus/exporter/memcached/memcached_test.go b/internal/component/prometheus/exporter/memcached/memcached_test.go index f31789753f..167600a9d1 100644 --- a/internal/component/prometheus/exporter/memcached/memcached_test.go +++ b/internal/component/prometheus/exporter/memcached/memcached_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/static/integrations/memcached_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) @@ -16,7 +16,7 @@ address = "localhost:99" timeout = "5s"` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) assert.NoError(t, err) expected := Arguments{ @@ -37,7 +37,7 @@ tls_config { key_file = "/path/to/key_file" }` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) assert.NoError(t, err) expected := Arguments{ @@ -58,7 +58,7 @@ tls_config { ca_pem = "ca" ca_file = "/path/to/ca_file" }` - err = river.Unmarshal([]byte(invalidRiverConfig), &args) + err = syntax.Unmarshal([]byte(invalidRiverConfig), &args) assert.Error(t, err) assert.ErrorContains(t, err, "at most one of") } @@ -73,7 +73,7 @@ func TestRiverUnmarshalDefaults(t *testing.T) { var exampleRiverConfig = `` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) assert.NoError(t, err) expected := DefaultArguments diff --git a/internal/component/prometheus/exporter/mongodb/mongodb_test.go b/internal/component/prometheus/exporter/mongodb/mongodb_test.go index c2e4ce3e3e..94f3c0152f 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb_test.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -38,7 +38,7 @@ func TestConvert(t *testing.T) { tls_basic_auth_config_path = "/etc/path-to-file" ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) res := args.Convert() diff --git a/internal/component/prometheus/exporter/mssql/mssql_test.go b/internal/component/prometheus/exporter/mssql/mssql_test.go index c0fd7b8e25..12f3fcc7e7 100644 --- a/internal/component/prometheus/exporter/mssql/mssql_test.go +++ b/internal/component/prometheus/exporter/mssql/mssql_test.go @@ -6,7 +6,7 @@ import ( "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/internal/static/integrations/mssql" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -21,7 +21,7 @@ func TestRiverUnmarshal(t *testing.T) { timeout = "10s"` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -43,7 +43,7 @@ func TestRiverUnmarshalWithInlineQueryConfig(t *testing.T) { query_config = "{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\" } ] }"` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) var collectorConfig config.CollectorConfig err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) @@ -72,7 +72,7 @@ func TestRiverUnmarshalWithInlineQueryConfigYaml(t *testing.T) { query_config = "collector_name: mssql_standard\nmetrics:\n- metric_name: mssql_local_time_seconds\n type: gauge\n help: 'Local time in seconds since epoch (Unix time).'\n values: [unix_time]\n query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\""` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) var collectorConfig config.CollectorConfig err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) @@ -101,7 +101,7 @@ func TestUnmarshalInvalid(t *testing.T) { ` var invalidArgs Arguments - err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + err := syntax.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) require.EqualError(t, err, "timeout must be positive") } @@ -116,7 +116,7 @@ func TestUnmarshalInvalidQueryConfigYaml(t *testing.T) { ` var invalidArgs Arguments - err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + err := syntax.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) require.EqualError(t, err, "invalid query_config: yaml: line 1: did not find expected ',' or ']'") } @@ -131,7 +131,7 @@ func TestUnmarshalInvalidProperty(t *testing.T) { ` var invalidArgs Arguments - err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + err := syntax.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) require.EqualError(t, err, "invalid query_config: unknown fields in collector: bad_param") } diff --git a/internal/component/prometheus/exporter/mysql/mysql_test.go b/internal/component/prometheus/exporter/mysql/mysql_test.go index eb2664d215..a42c212e5b 100644 --- a/internal/component/prometheus/exporter/mysql/mysql_test.go +++ b/internal/component/prometheus/exporter/mysql/mysql_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) @@ -55,7 +55,7 @@ func TestRiverConfigUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) require.Equal(t, "root:secret_password@tcp(localhost:3306)/mydb", string(args.DataSourceName)) @@ -126,7 +126,7 @@ func TestRiverConfigConvert(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) c := args.Convert() diff --git a/internal/component/prometheus/exporter/oracledb/oracledb_test.go b/internal/component/prometheus/exporter/oracledb/oracledb_test.go index aea1120890..6ae79e1143 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb_test.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -20,7 +20,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -99,7 +99,7 @@ func TestConvert(t *testing.T) { connection_string = "oracle://user:password@localhost:1521/orcl.localnet" ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) res := args.Convert() diff --git a/internal/component/prometheus/exporter/postgres/postgres_test.go b/internal/component/prometheus/exporter/postgres/postgres_test.go index 758d0d3d23..3306425e5d 100644 --- a/internal/component/prometheus/exporter/postgres/postgres_test.go +++ b/internal/component/prometheus/exporter/postgres/postgres_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" rivertypes "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" @@ -25,7 +25,7 @@ func TestRiverConfigUnmarshal(t *testing.T) { }` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -57,7 +57,7 @@ func TestRiverConfigConvert(t *testing.T) { }` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) c := args.Convert() diff --git a/internal/component/prometheus/exporter/process/process_test.go b/internal/component/prometheus/exporter/process/process_test.go index bcb8cc9011..a7da8afc03 100644 --- a/internal/component/prometheus/exporter/process/process_test.go +++ b/internal/component/prometheus/exporter/process/process_test.go @@ -3,7 +3,7 @@ package process import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/ncabatoff/process-exporter/config" "github.com/stretchr/testify/require" ) @@ -22,7 +22,7 @@ func TestRiverConfigUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) require.False(t, args.Children) @@ -54,7 +54,7 @@ func TestRiverConfigConvert(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) require.True(t, args.Children) diff --git a/internal/component/prometheus/exporter/redis/redis_test.go b/internal/component/prometheus/exporter/redis/redis_test.go index 15986afcb9..5c4f926c1d 100644 --- a/internal/component/prometheus/exporter/redis/redis_test.go +++ b/internal/component/prometheus/exporter/redis/redis_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/agent/internal/static/integrations/redis_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -41,7 +41,7 @@ func TestRiverUnmarshal(t *testing.T) { is_cluster = true ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -88,7 +88,7 @@ func TestUnmarshalInvalid(t *testing.T) { script_path = "/tmp/metrics.lua"` var args Arguments - err := river.Unmarshal([]byte(validRiverConfig), &args) + err := syntax.Unmarshal([]byte(validRiverConfig), &args) require.NoError(t, err) invalidRiverConfig := ` @@ -97,7 +97,7 @@ func TestUnmarshalInvalid(t *testing.T) { script_paths = ["/tmp/more-metrics.lua", "/tmp/even-more-metrics.lua"]` var invalidArgs Arguments - err = river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + err = syntax.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) } diff --git a/internal/component/prometheus/exporter/snmp/snmp_test.go b/internal/component/prometheus/exporter/snmp/snmp_test.go index 6f4c6d36ae..296f2792c1 100644 --- a/internal/component/prometheus/exporter/snmp/snmp_test.go +++ b/internal/component/prometheus/exporter/snmp/snmp_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/discovery" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/prometheus/snmp_exporter/config" @@ -35,7 +35,7 @@ func TestUnmarshalRiver(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "modules.yml", args.ConfigFile) require.Equal(t, 2, len(args.Targets)) @@ -162,7 +162,7 @@ func TestUnmarshalRiverWithInlineConfig(t *testing.T) { } ` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "", args.ConfigFile) require.Equal(t, args.ConfigStruct.Modules["if_mib"].Walk, []string{"1.3.6.1.2.1.2"}) @@ -244,7 +244,7 @@ func TestUnmarshalRiverWithInvalidInlineConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { var args Arguments - require.EqualError(t, river.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) + require.EqualError(t, syntax.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) }) } } diff --git a/internal/component/prometheus/exporter/snowflake/snowflake_test.go b/internal/component/prometheus/exporter/snowflake/snowflake_test.go index 98b6927d77..1a70433c56 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake_test.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -20,7 +20,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -42,7 +42,7 @@ func TestConvert(t *testing.T) { warehouse = "some_warehouse" ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) res := args.Convert() diff --git a/internal/component/prometheus/exporter/squid/squid_test.go b/internal/component/prometheus/exporter/squid/squid_test.go index 9d0714a3e4..522ca90bac 100644 --- a/internal/component/prometheus/exporter/squid/squid_test.go +++ b/internal/component/prometheus/exporter/squid/squid_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/grafana/agent/internal/static/integrations/squid_exporter" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" @@ -19,7 +19,7 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) expected := Arguments{ @@ -38,7 +38,7 @@ func TestConvert(t *testing.T) { password = "some_password" ` var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) + err := syntax.Unmarshal([]byte(riverConfig), &args) require.NoError(t, err) res := args.Convert() diff --git a/internal/component/prometheus/exporter/statsd/statsd_test.go b/internal/component/prometheus/exporter/statsd/statsd_test.go index 7721265436..9592c105b3 100644 --- a/internal/component/prometheus/exporter/statsd/statsd_test.go +++ b/internal/component/prometheus/exporter/statsd/statsd_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -32,7 +32,7 @@ var ( func TestRiverUnmarshal(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) require.Equal(t, ":1010", args.ListenUDP) @@ -56,7 +56,7 @@ func TestRiverUnmarshal(t *testing.T) { func TestConvert(t *testing.T) { t.Run("with valid config", func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) configStatsd, err := args.Convert() @@ -82,7 +82,7 @@ func TestConvert(t *testing.T) { t.Run("with empty config", func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(""), &args) + err := syntax.Unmarshal([]byte(""), &args) require.NoError(t, err) configStatsd, err := args.Convert() diff --git a/internal/component/prometheus/exporter/windows/config_default_windows_test.go b/internal/component/prometheus/exporter/windows/config_default_windows_test.go index ba31ed8ea9..f6d2ea6021 100644 --- a/internal/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/internal/component/prometheus/exporter/windows/config_default_windows_test.go @@ -3,13 +3,13 @@ package windows import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) func TestRiverUnmarshalWithDefaultConfig(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(""), &args) + err := syntax.Unmarshal([]byte(""), &args) require.NoError(t, err) var defaultArgs Arguments diff --git a/internal/component/prometheus/exporter/windows/windows_test.go b/internal/component/prometheus/exporter/windows/windows_test.go index 042cf982e3..813a3db51b 100644 --- a/internal/component/prometheus/exporter/windows/windows_test.go +++ b/internal/component/prometheus/exporter/windows/windows_test.go @@ -3,7 +3,7 @@ package windows import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -67,7 +67,7 @@ var ( func TestRiverUnmarshal(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) require.Equal(t, []string{"textfile", "cpu"}, args.EnabledCollectors) @@ -94,7 +94,7 @@ func TestRiverUnmarshal(t *testing.T) { func TestConvert(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) conf := args.Convert() diff --git a/internal/component/prometheus/operator/types_test.go b/internal/component/prometheus/operator/types_test.go index cab515b2ca..a4d5d527a7 100644 --- a/internal/component/prometheus/operator/types_test.go +++ b/internal/component/prometheus/operator/types_test.go @@ -3,7 +3,7 @@ package operator import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -24,6 +24,6 @@ func TestRiverUnmarshal(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } diff --git a/internal/component/prometheus/relabel/relabel_test.go b/internal/component/prometheus/relabel/relabel_test.go index dcd1e27f52..c7d0a04442 100644 --- a/internal/component/prometheus/relabel/relabel_test.go +++ b/internal/component/prometheus/relabel/relabel_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/service/labelstore" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" @@ -188,7 +188,7 @@ func TestRuleGetter(t *testing.T) { } forward_to = []` var args Arguments - require.NoError(t, river.Unmarshal([]byte(originalCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(originalCfg), &args)) // Set up and start the component. tc, err := componenttest.NewControllerFromID(nil, "prometheus.relabel") @@ -210,7 +210,7 @@ func TestRuleGetter(t *testing.T) { regex = "up" } forward_to = []` - require.NoError(t, river.Unmarshal([]byte(updatedCfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(updatedCfg), &args)) require.NoError(t, tc.Update(args)) exports = tc.Exports().(Exports) diff --git a/internal/component/prometheus/remotewrite/remote_write_test.go b/internal/component/prometheus/remotewrite/remote_write_test.go index 819cd0cd53..51f528d6a4 100644 --- a/internal/component/prometheus/remotewrite/remote_write_test.go +++ b/internal/component/prometheus/remotewrite/remote_write_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/component/prometheus/remotewrite" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" @@ -223,6 +223,6 @@ func sendMetric( func testArgsForConfig(t *testing.T, cfg string) remotewrite.Arguments { var args remotewrite.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) return args } diff --git a/internal/component/prometheus/remotewrite/types_test.go b/internal/component/prometheus/remotewrite/types_test.go index ed7f4ec751..cca99d8d26 100644 --- a/internal/component/prometheus/remotewrite/types_test.go +++ b/internal/component/prometheus/remotewrite/types_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" commonconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" @@ -263,7 +263,7 @@ func TestRiverConfig(t *testing.T) { for _, tc := range tests { t.Run(tc.testName, func(t *testing.T) { var args Arguments - err := river.Unmarshal([]byte(tc.cfg), &args) + err := syntax.Unmarshal([]byte(tc.cfg), &args) if tc.errorMsg != "" { require.ErrorContains(t, err, tc.errorMsg) diff --git a/internal/component/prometheus/scrape/scrape_test.go b/internal/component/prometheus/scrape/scrape_test.go index fbfda5799d..630a3d7f5c 100644 --- a/internal/component/prometheus/scrape/scrape_test.go +++ b/internal/component/prometheus/scrape/scrape_test.go @@ -14,7 +14,7 @@ import ( http_service "github.com/grafana/agent/internal/service/http" "github.com/grafana/agent/internal/service/labelstore" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/ckit/memconn" prometheus_client "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -47,7 +47,7 @@ func TestRiverConfig(t *testing.T) { ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) } @@ -67,7 +67,7 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } @@ -173,7 +173,7 @@ func TestCustomDialer(t *testing.T) { scrape_timeout = "85ms" ` var args Arguments - err := river.Unmarshal([]byte(config), &args) + err := syntax.Unmarshal([]byte(config), &args) require.NoError(t, err) opts := component.Options{ @@ -220,6 +220,6 @@ func TestValidateScrapeConfig(t *testing.T) { job_name = "local" ` var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "scrape_timeout (20s) greater than scrape_interval (10s) for scrape config with job name \"local\"") } diff --git a/internal/component/pyroscope/ebpf/ebpf_linux_test.go b/internal/component/pyroscope/ebpf/ebpf_linux_test.go index 05c8a21fb9..71e49f53c2 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux_test.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux_test.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ebpfspy "github.com/grafana/pyroscope/ebpf" "github.com/grafana/pyroscope/ebpf/pprof" "github.com/grafana/pyroscope/ebpf/sd" @@ -151,7 +151,7 @@ func TestContextShutdown(t *testing.T) { func TestUnmarshalConfig(t *testing.T) { var arg Arguments - err := river.Unmarshal([]byte(`targets = [{"service_name" = "foo", "container_id"= "cid"}] + err := syntax.Unmarshal([]byte(`targets = [{"service_name" = "foo", "container_id"= "cid"}] forward_to = [] collect_interval = "3s" sample_rate = 239 @@ -177,7 +177,7 @@ collect_kernel_profile = false`), &arg) func TestUnmarshalBadConfig(t *testing.T) { var arg Arguments - err := river.Unmarshal([]byte(`targets = [{"service_name" = "foo", "container_id"= "cid"}] + err := syntax.Unmarshal([]byte(`targets = [{"service_name" = "foo", "container_id"= "cid"}] forward_to = [] collect_interval = 3s" sample_rate = 239 diff --git a/internal/component/pyroscope/scrape/scrape_test.go b/internal/component/pyroscope/scrape/scrape_test.go index b9daf6a4dc..1193b0b37c 100644 --- a/internal/component/pyroscope/scrape/scrape_test.go +++ b/internal/component/pyroscope/scrape/scrape_test.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/service/cluster" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -187,12 +187,12 @@ func TestUnmarshalConfig(t *testing.T) { t.Run(name, func(t *testing.T) { arg := Arguments{} if tt.expectedErr != "" { - err := river.Unmarshal([]byte(tt.in), &arg) + err := syntax.Unmarshal([]byte(tt.in), &arg) require.Error(t, err) require.Equal(t, tt.expectedErr, err.Error()) return } - require.NoError(t, river.Unmarshal([]byte(tt.in), &arg)) + require.NoError(t, syntax.Unmarshal([]byte(tt.in), &arg)) require.Equal(t, tt.expected(), arg) }) } diff --git a/internal/component/pyroscope/write/write_test.go b/internal/component/pyroscope/write/write_test.go index 68f28a34f9..d803c3a4d7 100644 --- a/internal/component/pyroscope/write/write_test.go +++ b/internal/component/pyroscope/write/write_test.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/pyroscope" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" pushv1 "github.com/grafana/pyroscope/api/gen/proto/go/push/v1" "github.com/grafana/pyroscope/api/gen/proto/go/push/v1/pushv1connect" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" @@ -210,7 +210,7 @@ func Test_Write_Update(t *testing.T) { func Test_Unmarshal_Config(t *testing.T) { var arg Arguments - river.Unmarshal([]byte(` + syntax.Unmarshal([]byte(` endpoint { url = "http://localhost:4100" remote_timeout = "10s" @@ -250,6 +250,6 @@ func TestBadRiverConfig(t *testing.T) { // Make sure the squashed HTTPClientConfig Validate function is being utilized correctly var args Arguments - err := river.Unmarshal([]byte(exampleRiverConfig), &args) + err := syntax.Unmarshal([]byte(exampleRiverConfig), &args) require.ErrorContains(t, err, "at most one of basic_auth, authorization, oauth2, bearer_token & bearer_token_file must be configured") } diff --git a/internal/component/remote/http/http_test.go b/internal/component/remote/http/http_test.go index b4ed7a5eef..ec89669436 100644 --- a/internal/component/remote/http/http_test.go +++ b/internal/component/remote/http/http_test.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" @@ -50,7 +50,7 @@ func Test(t *testing.T) { poll_timeout = "25ms" `, srv.URL, http.MethodPut, "hello there!") var args http_component.Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) go func() { err := ctrl.Run(ctx, args) @@ -126,7 +126,7 @@ func TestUnmarshalValidation(t *testing.T) { for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { var args http_component.Arguments - require.EqualError(t, river.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) + require.EqualError(t, syntax.Unmarshal([]byte(tt.cfg), &args), tt.expectedError) }) } } diff --git a/internal/component/remote/kubernetes/kubernetes_test.go b/internal/component/remote/kubernetes/kubernetes_test.go index 583b480e5c..aef8410612 100644 --- a/internal/component/remote/kubernetes/kubernetes_test.go +++ b/internal/component/remote/kubernetes/kubernetes_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" "gotest.tools/assert" ) @@ -17,7 +17,7 @@ func TestRiverUnmarshal(t *testing.T) { poll_timeout = "1s"` var args Arguments - err := river.Unmarshal([]byte(riverCfg), &args) + err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) assert.Equal(t, 10*time.Minute, args.PollFrequency) diff --git a/internal/component/remote/vault/vault_test.go b/internal/component/remote/vault/vault_test.go index b1279c2ddb..bf26c4d06f 100644 --- a/internal/component/remote/vault/vault_test.go +++ b/internal/component/remote/vault/vault_test.go @@ -14,7 +14,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" @@ -47,7 +47,7 @@ func Test_GetSecrets(t *testing.T) { `, cli.Address(), cli.Token()) var args Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) ctrl, err := componenttest.NewControllerFromID(l, "remote.vault") require.NoError(t, err) @@ -96,7 +96,7 @@ func Test_PollSecrets(t *testing.T) { `, cli.Address(), cli.Token()) var args Arguments - require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + require.NoError(t, syntax.Unmarshal([]byte(cfg), &args)) ctrl, err := componenttest.NewControllerFromID(l, "remote.vault") require.NoError(t, err) diff --git a/internal/converter/internal/common/convert_appendable.go b/internal/converter/internal/common/convert_appendable.go index 2d159b599f..67b5ab81a4 100644 --- a/internal/converter/internal/common/convert_appendable.go +++ b/internal/converter/internal/common/convert_appendable.go @@ -1,7 +1,7 @@ package common import ( - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" "github.com/prometheus/prometheus/storage" @@ -19,7 +19,7 @@ type ConvertAppendable struct { var _ storage.Appendable = (*ConvertAppendable)(nil) var _ builder.Tokenizer = ConvertAppendable{} -var _ river.Capsule = ConvertAppendable{} +var _ syntax.Capsule = ConvertAppendable{} func (f ConvertAppendable) RiverCapsule() {} func (f ConvertAppendable) RiverTokenize() []builder.Token { diff --git a/internal/converter/internal/common/convert_logs_receiver.go b/internal/converter/internal/common/convert_logs_receiver.go index e66c440786..b3110af4d8 100644 --- a/internal/converter/internal/common/convert_logs_receiver.go +++ b/internal/converter/internal/common/convert_logs_receiver.go @@ -2,7 +2,7 @@ package common import ( "github.com/grafana/agent/internal/component/common/loki" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" ) @@ -17,7 +17,7 @@ type ConvertLogsReceiver struct { var _ loki.LogsReceiver = (*ConvertLogsReceiver)(nil) var _ builder.Tokenizer = ConvertLogsReceiver{} -var _ river.Capsule = ConvertLogsReceiver{} +var _ syntax.Capsule = ConvertLogsReceiver{} func (f ConvertLogsReceiver) RiverCapsule() {} func (f ConvertLogsReceiver) RiverTokenize() []builder.Token { diff --git a/internal/converter/internal/common/convert_targets.go b/internal/converter/internal/common/convert_targets.go index 0819ea9404..15a7116deb 100644 --- a/internal/converter/internal/common/convert_targets.go +++ b/internal/converter/internal/common/convert_targets.go @@ -2,7 +2,7 @@ package common import ( "github.com/grafana/agent/internal/component/discovery" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" ) @@ -33,7 +33,7 @@ type ConvertTargets struct { } var _ builder.Tokenizer = ConvertTargets{} -var _ river.Capsule = ConvertTargets{} +var _ syntax.Capsule = ConvertTargets{} func (f ConvertTargets) RiverCapsule() {} func (f ConvertTargets) RiverTokenize() []builder.Token { diff --git a/internal/converter/internal/common/river_utils.go b/internal/converter/internal/common/river_utils.go index 09ae0d2d80..4ee518babc 100644 --- a/internal/converter/internal/common/river_utils.go +++ b/internal/converter/internal/common/river_utils.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/parser" "github.com/grafana/alloy/syntax/printer" @@ -127,13 +127,13 @@ func SanitizeIdentifierPanics(in string) string { } // DefaultValue returns the default value for a given type. If *T implements -// river.Defaulter, a value will be returned with defaults applied. If *T does -// not implement river.Defaulter, the zero value of T is returned. +// syntax.Defaulter, a value will be returned with defaults applied. If *T does +// not implement syntax.Defaulter, the zero value of T is returned. // // T must not be a pointer type. func DefaultValue[T any]() T { var val T - if defaulter, ok := any(&val).(river.Defaulter); ok { + if defaulter, ok := any(&val).(syntax.Defaulter); ok { defaulter.SetToDefault() } return val diff --git a/internal/converter/internal/common/river_utils_test.go b/internal/converter/internal/common/river_utils_test.go index 40d7694457..57ccd53fb8 100644 --- a/internal/converter/internal/common/river_utils_test.go +++ b/internal/converter/internal/common/river_utils_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/grafana/agent/internal/converter/internal/common" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) @@ -19,7 +19,7 @@ type defaultingType struct { Number int } -var _ river.Defaulter = (*defaultingType)(nil) +var _ syntax.Defaulter = (*defaultingType)(nil) func (dt *defaultingType) SetToDefault() { dt.Number = 42 diff --git a/internal/flow/logging/options.go b/internal/flow/logging/options.go index 0607bb12c2..1cde94fffa 100644 --- a/internal/flow/logging/options.go +++ b/internal/flow/logging/options.go @@ -7,7 +7,7 @@ import ( "math" "github.com/grafana/agent/internal/component/common/loki" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) // Options is a set of options used to construct and configure a Logger. @@ -24,9 +24,9 @@ var DefaultOptions = Options{ Format: FormatDefault, } -var _ river.Defaulter = (*Options)(nil) +var _ syntax.Defaulter = (*Options)(nil) -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (o *Options) SetToDefault() { *o = DefaultOptions } diff --git a/internal/service/http/http_test.go b/internal/service/http/http_test.go index 37b2e2c5ed..b34d66f927 100644 --- a/internal/service/http/http_test.go +++ b/internal/service/http/http_test.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/agent/internal/service" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -187,7 +187,7 @@ func newTestEnvironment(t *testing.T) (*testEnvironment, error) { func (env *testEnvironment) ApplyConfig(config string) error { var args Arguments - if err := river.Unmarshal([]byte(config), &args); err != nil { + if err := syntax.Unmarshal([]byte(config), &args); err != nil { return err } return env.svc.Update(args) diff --git a/internal/service/http/tls.go b/internal/service/http/tls.go index bf428e4e6b..c46b8f3fcd 100644 --- a/internal/service/http/tls.go +++ b/internal/service/http/tls.go @@ -8,7 +8,7 @@ import ( "os" "time" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/regexp" ) @@ -53,14 +53,14 @@ type WindowsServerFilter struct { RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` } -var _ river.Defaulter = (*WindowsServerFilter)(nil) +var _ syntax.Defaulter = (*WindowsServerFilter)(nil) // SetToDefault sets the default for WindowsServerFilter func (wcf *WindowsServerFilter) SetToDefault() { wcf.RefreshInterval = 5 * time.Minute } -var _ river.Validator = (*TLSArguments)(nil) +var _ syntax.Validator = (*TLSArguments)(nil) // Validate returns whether args is valid. func (args *TLSArguments) Validate() error { diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index e840b850b6..2d2365d912 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -20,7 +20,7 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/service" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" commonconfig "github.com/prometheus/common/config" ) @@ -79,12 +79,12 @@ func GetDefaultArguments() Arguments { } } -// SetToDefault implements river.Defaulter. +// SetToDefault implements syntax.Defaulter. func (a *Arguments) SetToDefault() { *a = GetDefaultArguments() } -// Validate implements river.Validator. +// Validate implements syntax.Validator. func (a *Arguments) Validate() error { // We must explicitly Validate because HTTPClientConfig is squashed and it // won't run otherwise @@ -97,7 +97,7 @@ func (a *Arguments) Validate() error { // Hash marshals the Arguments and returns a hash representation. func (a *Arguments) Hash() (string, error) { - b, err := river.Marshal(a) + b, err := syntax.Marshal(a) if err != nil { return "", fmt.Errorf("failed to marshal arguments: %w", err) } diff --git a/internal/service/remotecfg/remotecfg_test.go b/internal/service/remotecfg/remotecfg_test.go index e469081f0e..e8d8f7c61f 100644 --- a/internal/service/remotecfg/remotecfg_test.go +++ b/internal/service/remotecfg/remotecfg_test.go @@ -19,7 +19,7 @@ import ( "github.com/grafana/agent/internal/flow/logging" "github.com/grafana/agent/internal/service" "github.com/grafana/agent/internal/util" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -135,7 +135,7 @@ func newTestEnvironment(t *testing.T) *testEnvironment { func (env *testEnvironment) ApplyConfig(config string) error { var args Arguments - if err := river.Unmarshal([]byte(config), &args); err != nil { + if err := syntax.Unmarshal([]byte(config), &args); err != nil { return err } return env.svc.Update(args) diff --git a/syntax/encoding/riverjson/riverjson_test.go b/syntax/encoding/riverjson/riverjson_test.go index e567f8a5cc..2a8f8ae3cd 100644 --- a/syntax/encoding/riverjson/riverjson_test.go +++ b/syntax/encoding/riverjson/riverjson_test.go @@ -3,7 +3,7 @@ package riverjson_test import ( "testing" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/encoding/riverjson" "github.com/stretchr/testify/require" @@ -304,7 +304,7 @@ type defaultsBlock struct { Age int `river:"age,attr,optional"` } -var _ river.Defaulter = (*defaultsBlock)(nil) +var _ syntax.Defaulter = (*defaultsBlock)(nil) func (d *defaultsBlock) SetToDefault() { *d = defaultsBlock{ diff --git a/syntax/syntax_test.go b/syntax/syntax_test.go index 95276ab43e..747c2c4b6c 100644 --- a/syntax/syntax_test.go +++ b/syntax/syntax_test.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - river "github.com/grafana/alloy/syntax" + "github.com/grafana/alloy/syntax" ) func ExampleUnmarshal() { @@ -48,7 +48,7 @@ func ExampleUnmarshal() { // Unmarshal the config into our Book type and print out the data. var b Book - if err := river.Unmarshal([]byte(input), &b); err != nil { + if err := syntax.Unmarshal([]byte(input), &b); err != nil { panic(err) } @@ -84,7 +84,7 @@ func ExampleUnmarshal_functions() { ` var d Data - if err := river.Unmarshal([]byte(input), &d); err != nil { + if err := syntax.Unmarshal([]byte(input), &d); err != nil { panic(err) } @@ -96,7 +96,7 @@ func ExampleUnmarshalValue() { input := `3 + 5` var num int - if err := river.UnmarshalValue([]byte(input), &num); err != nil { + if err := syntax.UnmarshalValue([]byte(input), &num); err != nil { panic(err) } @@ -116,7 +116,7 @@ func ExampleMarshal() { Age: 43, } - bb, err := river.Marshal(p) + bb, err := syntax.Marshal(p) if err != nil { panic(err) } @@ -138,7 +138,7 @@ func ExampleMarshalValue() { Age: 43, } - bb, err := river.MarshalValue(p) + bb, err := syntax.MarshalValue(p) if err != nil { panic(err) } From e1aab19feaf64df8099148a5a8001f5f2c7f1049 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:27:44 -0400 Subject: [PATCH 044/136] syntax: remove unused riverfmt tool --- syntax/cmd/riverfmt/main.go | 103 ------------------------------------ 1 file changed, 103 deletions(-) delete mode 100644 syntax/cmd/riverfmt/main.go diff --git a/syntax/cmd/riverfmt/main.go b/syntax/cmd/riverfmt/main.go deleted file mode 100644 index 84fc4986bc..0000000000 --- a/syntax/cmd/riverfmt/main.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "os" - - "github.com/grafana/alloy/syntax/diag" - "github.com/grafana/alloy/syntax/parser" - "github.com/grafana/alloy/syntax/printer" -) - -func main() { - err := run() - - var diags diag.Diagnostics - if errors.As(err, &diags) { - for _, diag := range diags { - fmt.Fprintln(os.Stderr, diag) - } - os.Exit(1) - } else if err != nil { - fmt.Fprintf(os.Stderr, "error: %s\n", err) - os.Exit(1) - } -} - -func run() error { - var ( - write bool - ) - - fs := flag.NewFlagSet("riverfmt", flag.ExitOnError) - fs.BoolVar(&write, "w", write, "write result to (source) file instead of stdout") - - if err := fs.Parse(os.Args[1:]); err != nil { - return err - } - - args := fs.Args() - switch len(args) { - case 0: - if write { - return fmt.Errorf("cannot use -w with standard input") - } - return format("", nil, os.Stdin, write) - - case 1: - fi, err := os.Stat(args[0]) - if err != nil { - return err - } - if fi.IsDir() { - return fmt.Errorf("cannot format a directory") - } - f, err := os.Open(args[0]) - if err != nil { - return err - } - defer f.Close() - return format(args[0], fi, f, write) - - default: - return fmt.Errorf("can only format one file") - } -} - -func format(filename string, fi os.FileInfo, r io.Reader, write bool) error { - bb, err := io.ReadAll(r) - if err != nil { - return err - } - - f, err := parser.ParseFile(filename, bb) - if err != nil { - return err - } - - var buf bytes.Buffer - if err := printer.Fprint(&buf, f); err != nil { - return err - } - - // Add a newline at the end - _, _ = buf.Write([]byte{'\n'}) - - if !write { - _, err := io.Copy(os.Stdout, &buf) - return err - } - - wf, err := os.OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, fi.Mode().Perm()) - if err != nil { - return err - } - defer wf.Close() - - _, err = io.Copy(wf, &buf) - return err -} From b13c9b3eede1ea877a5519015c257d92f83d3f50 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:30:56 -0400 Subject: [PATCH 045/136] syntax/encoding: rename riverjson package to alloyjson --- internal/component/component_provider.go | 8 +++---- .../riverjson.go => alloyjson/alloyjson.go} | 14 ++++++------ .../alloyjson_test.go} | 22 +++++++++---------- .../{riverjson => alloyjson}/types.go | 2 +- 4 files changed, 23 insertions(+), 23 deletions(-) rename syntax/encoding/{riverjson/riverjson.go => alloyjson/alloyjson.go} (95%) rename syntax/encoding/{riverjson/riverjson_test.go => alloyjson/alloyjson_test.go} (94%) rename syntax/encoding/{riverjson => alloyjson}/types.go (98%) diff --git a/internal/component/component_provider.go b/internal/component/component_provider.go index a9f2d9f1d1..1f2c981447 100644 --- a/internal/component/component_provider.go +++ b/internal/component/component_provider.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/grafana/alloy/syntax/encoding/riverjson" + "github.com/grafana/alloy/syntax/encoding/alloyjson" ) var ( @@ -170,15 +170,15 @@ func (info *Info) MarshalJSON() ([]byte, error) { referencedBy = []string{} } - arguments, err = riverjson.MarshalBody(info.Arguments) + arguments, err = alloyjson.MarshalBody(info.Arguments) if err != nil { return nil, err } - exports, err = riverjson.MarshalBody(info.Exports) + exports, err = alloyjson.MarshalBody(info.Exports) if err != nil { return nil, err } - debugInfo, err = riverjson.MarshalBody(info.DebugInfo) + debugInfo, err = alloyjson.MarshalBody(info.DebugInfo) if err != nil { return nil, err } diff --git a/syntax/encoding/riverjson/riverjson.go b/syntax/encoding/alloyjson/alloyjson.go similarity index 95% rename from syntax/encoding/riverjson/riverjson.go rename to syntax/encoding/alloyjson/alloyjson.go index 9afdb0aba1..8e0616f661 100644 --- a/syntax/encoding/riverjson/riverjson.go +++ b/syntax/encoding/alloyjson/alloyjson.go @@ -1,5 +1,5 @@ -// Package riverjson encodes River as JSON. -package riverjson +// Package alloyjson encodes Alloy configuration syntax as JSON. +package alloyjson import ( "encoding/json" @@ -61,7 +61,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { case reflect.Map: if rv.Type().Key().Kind() != reflect.String { - panic("syntax/encoding/riverjson: unsupported map type; expected map[string]T, got " + rv.Type().String()) + panic("syntax/encoding/alloyjson: unsupported map type; expected map[string]T, got " + rv.Type().String()) } iter := rv.MapRange() @@ -76,7 +76,7 @@ func encodeStructAsBody(rv reflect.Value) jsonBody { } default: - panic(fmt.Sprintf("syntax/encoding/riverjson: can only encode struct or map[string]T values to bodies, got %s", rv.Kind())) + panic(fmt.Sprintf("syntax/encoding/alloyjson: can only encode struct or map[string]T values to bodies, got %s", rv.Kind())) } return body @@ -111,7 +111,7 @@ func encodeFieldAsStatements(prefix []string, field syntaxtags.Field, fieldValue // Iterate over the map and add each element as an attribute into it. if fieldValue.Type().Key().Kind() != reflect.String { - panic("syntax/encoding/riverjson: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) + panic("syntax/encoding/alloyjson: unsupported map type for block; expected map[string]T, got " + fieldValue.Type().String()) } statements := []jsonStatement{} @@ -182,7 +182,7 @@ func encodeFieldAsStatements(prefix []string, field syntaxtags.Field, fieldValue return statements default: - panic(fmt.Sprintf("syntax/encoding/riverjson: unrecognized enum kind %s", fieldValue.Kind())) + panic(fmt.Sprintf("syntax/encoding/alloyjson: unrecognized enum kind %s", fieldValue.Kind())) } } @@ -308,6 +308,6 @@ func buildJSONValue(v value.Value) jsonValue { return jsonValue{Type: "capsule", Value: v.Describe()} default: - panic(fmt.Sprintf("syntax/encoding/riverjson: unrecognized value type %q", v.Type())) + panic(fmt.Sprintf("syntax/encoding/alloyjson: unrecognized value type %q", v.Type())) } } diff --git a/syntax/encoding/riverjson/riverjson_test.go b/syntax/encoding/alloyjson/alloyjson_test.go similarity index 94% rename from syntax/encoding/riverjson/riverjson_test.go rename to syntax/encoding/alloyjson/alloyjson_test.go index 2a8f8ae3cd..3af2e02b30 100644 --- a/syntax/encoding/riverjson/riverjson_test.go +++ b/syntax/encoding/alloyjson/alloyjson_test.go @@ -1,11 +1,11 @@ -package riverjson_test +package alloyjson_test import ( "testing" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" - "github.com/grafana/alloy/syntax/encoding/riverjson" + "github.com/grafana/alloy/syntax/encoding/alloyjson" "github.com/stretchr/testify/require" ) @@ -107,7 +107,7 @@ func TestValues(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - actual, err := riverjson.MarshalValue(tc.input) + actual, err := alloyjson.MarshalValue(tc.input) require.NoError(t, err) require.JSONEq(t, tc.expectJSON, string(actual)) }) @@ -194,7 +194,7 @@ func TestBlock(t *testing.T) { } ]` - actual, err := riverjson.MarshalBody(val) + actual, err := alloyjson.MarshalBody(val) require.NoError(t, err) require.JSONEq(t, expect, string(actual)) } @@ -216,7 +216,7 @@ func TestBlock_Empty_Required_Block_Slice(t *testing.T) { t.Run(tc.name, func(t *testing.T) { expect := `[]` - actual, err := riverjson.MarshalBody(tc.val) + actual, err := alloyjson.MarshalBody(tc.val) require.NoError(t, err) require.JSONEq(t, expect, string(actual)) }) @@ -240,7 +240,7 @@ type labeledBlock struct { } func TestNilBody(t *testing.T) { - actual, err := riverjson.MarshalBody(nil) + actual, err := alloyjson.MarshalBody(nil) require.NoError(t, err) require.JSONEq(t, `[]`, string(actual)) } @@ -248,7 +248,7 @@ func TestNilBody(t *testing.T) { func TestEmptyBody(t *testing.T) { type block struct{} - actual, err := riverjson.MarshalBody(block{}) + actual, err := alloyjson.MarshalBody(block{}) require.NoError(t, err) require.JSONEq(t, `[]`, string(actual)) } @@ -292,7 +292,7 @@ func TestHideDefaults(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - actual, err := riverjson.MarshalBody(tc.val) + actual, err := alloyjson.MarshalBody(tc.val) require.NoError(t, err) require.JSONEq(t, tc.expectJSON, string(actual)) }) @@ -329,7 +329,7 @@ func TestMapBlocks(t *testing.T) { }] }]` - bb, err := riverjson.MarshalBody(val) + bb, err := alloyjson.MarshalBody(val) require.NoError(t, err) require.JSONEq(t, expect, string(bb)) } @@ -343,7 +343,7 @@ func TestRawMap(t *testing.T) { "value": { "type": "string", "value": "value" } }]` - bb, err := riverjson.MarshalBody(val) + bb, err := alloyjson.MarshalBody(val) require.NoError(t, err) require.JSONEq(t, expect, string(bb)) } @@ -357,7 +357,7 @@ func TestRawMap_Capsule(t *testing.T) { "value": { "type": "capsule", "value": "(secret)" } }]` - bb, err := riverjson.MarshalBody(val) + bb, err := alloyjson.MarshalBody(val) require.NoError(t, err) require.JSONEq(t, expect, string(bb)) } diff --git a/syntax/encoding/riverjson/types.go b/syntax/encoding/alloyjson/types.go similarity index 98% rename from syntax/encoding/riverjson/types.go rename to syntax/encoding/alloyjson/types.go index 3170331e46..b176df478c 100644 --- a/syntax/encoding/riverjson/types.go +++ b/syntax/encoding/alloyjson/types.go @@ -1,4 +1,4 @@ -package riverjson +package alloyjson // Various concrete types used to marshal River values. type ( From 0b467eec50936601ac90a38a01a2d9d1aef47267 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:43:41 -0400 Subject: [PATCH 046/136] ci: fix CI jobs for Alloy (#67) --- .drone/drone.yml | 4 +- .drone/pipelines/test_packages.jsonnet | 2 + .../agent_linux_packages_test.go | 50 +++++++++---------- tools/ci/docker-containers-windows | 5 +- 4 files changed, 34 insertions(+), 27 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index ce1dd8a68c..3f631aca9d 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -311,7 +311,9 @@ steps: trigger: paths: - packaging/** + - internal/tools/packaging_test/** - Makefile + - tools/make/*.mk ref: - refs/heads/main type: docker @@ -399,6 +401,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: ba4497becf94a0f6f8dead2d99f8636683fbfba81eb869723c0a71ce4f7dcc09 +hmac: 1a44f5bcd0f52805d4b7d1f11433f993832627118bbae3956fe5aa49b68d5a31 ... diff --git a/.drone/pipelines/test_packages.jsonnet b/.drone/pipelines/test_packages.jsonnet index 42fca63679..7ec90a805b 100644 --- a/.drone/pipelines/test_packages.jsonnet +++ b/.drone/pipelines/test_packages.jsonnet @@ -7,7 +7,9 @@ local pipelines = import '../util/pipelines.jsonnet'; ref: ['refs/heads/main'], paths: [ 'packaging/**', + 'internal/tools/packaging_test/**', 'Makefile', + 'tools/make/*.mk', ], }, steps: [{ diff --git a/internal/tools/packaging_test/agent_linux_packages_test.go b/internal/tools/packaging_test/agent_linux_packages_test.go index 9bee704566..d9552b0a8f 100644 --- a/internal/tools/packaging_test/agent_linux_packages_test.go +++ b/internal/tools/packaging_test/agent_linux_packages_test.go @@ -27,11 +27,11 @@ func TestAlloyLinuxPackages(t *testing.T) { tt := []struct { name string - f func(*AgentEnvironment, *testing.T) + f func(*AlloyEnvironment, *testing.T) }{ - {"install package", (*AgentEnvironment).TestInstall}, - {"ensure existing config doesn't get overridden", (*AgentEnvironment).TestConfigPersistence}, - {"test data folder permissions", (*AgentEnvironment).TestDataFolderPermissions}, + {"install package", (*AlloyEnvironment).TestInstall}, + {"ensure existing config doesn't get overridden", (*AlloyEnvironment).TestConfigPersistence}, + {"test data folder permissions", (*AlloyEnvironment).TestDataFolderPermissions}, // TODO: a test to verify that the systemd service works would be nice, but not // required. @@ -43,11 +43,11 @@ func TestAlloyLinuxPackages(t *testing.T) { for _, tc := range tt { t.Run(tc.name+"/rpm", func(t *testing.T) { - env := &AgentEnvironment{RPMEnvironment(t, packageName, dockerPool)} + env := &AlloyEnvironment{RPMEnvironment(t, packageName, dockerPool)} tc.f(env, t) }) t.Run(tc.name+"/deb", func(t *testing.T) { - env := &AgentEnvironment{DEBEnvironment(t, packageName, dockerPool)} + env := &AlloyEnvironment{DEBEnvironment(t, packageName, dockerPool)} tc.f(env, t) }) } @@ -61,7 +61,7 @@ func buildAlloyPackages(t *testing.T) { root, err := filepath.Abs(filepath.Join(wd, "../../..")) require.NoError(t, err) - cmd := exec.Command("make", fmt.Sprintf("dist-agent-packages-%s", runtime.GOARCH)) + cmd := exec.Command("make", fmt.Sprintf("dist-alloy-packages-%s", runtime.GOARCH)) cmd.Env = append( os.Environ(), "VERSION=v0.0.0", @@ -73,47 +73,47 @@ func buildAlloyPackages(t *testing.T) { require.NoError(t, cmd.Run()) } -type AgentEnvironment struct{ Environment } +type AlloyEnvironment struct{ Environment } -func (env *AgentEnvironment) TestInstall(t *testing.T) { +func (env *AlloyEnvironment) TestInstall(t *testing.T) { res := env.Install() require.Equal(t, 0, res.ExitCode, "installing failed") - res = env.ExecScript(`[ -f /usr/bin/grafana-agent ]`) - require.Equal(t, 0, res.ExitCode, "expected grafana-agent to be installed") - res = env.ExecScript(`[ -f /etc/grafana-agent.river ]`) - require.Equal(t, 0, res.ExitCode, "expected grafana agent configuration file to exist") + res = env.ExecScript(`[ -f /usr/bin/alloy]`) + require.Equal(t, 0, res.ExitCode, "expected Alloy to be installed") + res = env.ExecScript(`[ -f /etc/alloy.river ]`) + require.Equal(t, 0, res.ExitCode, "expected Alloy configuration file to exist") res = env.Uninstall() require.Equal(t, 0, res.ExitCode, "uninstalling failed") - res = env.ExecScript(`[ -f /usr/bin/grafana-agent ]`) - require.Equal(t, 1, res.ExitCode, "expected grafana-agent to be uninstalled") + res = env.ExecScript(`[ -f /usr/bin/alloy ]`) + require.Equal(t, 1, res.ExitCode, "expected Alloy to be uninstalled") // NOTE(rfratto): we don't check for what happens to the config file here, // since the behavior is inconsistent: rpm uninstalls it, but deb doesn't. } -func (env *AgentEnvironment) TestConfigPersistence(t *testing.T) { - res := env.ExecScript(`echo -n "keepalive" > /etc/grafana-agent.river`) +func (env *AlloyEnvironment) TestConfigPersistence(t *testing.T) { + res := env.ExecScript(`echo -n "keepalive" > /etc/alloy.river`) require.Equal(t, 0, res.ExitCode, "failed to write config file") res = env.Install() require.Equal(t, 0, res.ExitCode, "installation failed") - res = env.ExecScript(`cat /etc/grafana-agent.river`) + res = env.ExecScript(`cat /etc/alloy.river`) require.Equal(t, "keepalive", res.Stdout, "Expected existing file to not be overridden") } -func (env *AgentEnvironment) TestDataFolderPermissions(t *testing.T) { - // Installing should create /var/lib/grafana-agent, assign it to the - // grafana-agent user and group, and set its permissions to 0770. +func (env *AlloyEnvironment) TestDataFolderPermissions(t *testing.T) { + // Installing should create /var/lib/alloy, assign it to the + // alloy user and group, and set its permissions to 0770. res := env.Install() require.Equal(t, 0, res.ExitCode, "installation failed") - res = env.ExecScript(`[ -d /var/lib/grafana-agent ]`) - require.Equal(t, 0, res.ExitCode, "Expected /var/lib/grafana-agent to have been created during install") + res = env.ExecScript(`[ -d /var/lib/alloy ]`) + require.Equal(t, 0, res.ExitCode, "Expected /var/lib/alloy to have been created during install") - res = env.ExecScript(`stat -c '%a:%U:%G' /var/lib/grafana-agent`) - require.Equal(t, "770:grafana-agent:grafana-agent\n", res.Stdout, "wrong permissions for data folder") + res = env.ExecScript(`stat -c '%a:%U:%G' /var/lib/alloy`) + require.Equal(t, "770:alloy:alloy\n", res.Stdout, "wrong permissions for data folder") require.Equal(t, 0, res.ExitCode, "stat'ing data folder failed") } diff --git a/tools/ci/docker-containers-windows b/tools/ci/docker-containers-windows index f43db76536..dd55324ec9 100644 --- a/tools/ci/docker-containers-windows +++ b/tools/ci/docker-containers-windows @@ -30,7 +30,10 @@ else VERSION=$(./tools/image-tag) fi -VERSION_TAG=$VERSION-windows +# The VERSION_TAG is the version to use for the Docker tag. It is sanitized to +# force it to be a valid tag name; ./tools/image-tag can emit characters that +# are valid for semver but invalid for Docker tags, such as +. +VERSION_TAG=${VERSION//+/-}-windows # We also need to know which "branch tag" to update. Branch tags are used as a # secondary tag for Docker containers. The branch tag is "latest" when being From 4d4b5a84945c4c20b003d5b40fac0b2f16cd20f5 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:47:10 -0400 Subject: [PATCH 047/136] packaging.mk: fix invalid target dependency --- tools/make/packaging.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index dc24661f8a..ec259d4367 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -145,7 +145,7 @@ dist-alloy-packages: dist-alloy-packages-amd64 \ dist-alloy-packages-s390x .PHONY: dist-alloy-packages-amd64 -dist-alloy-packages-amd64: dist/grafana-alloy-linux-amd64 +dist-alloy-packages-amd64: dist/alloy-linux-amd64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else @@ -154,7 +154,7 @@ else endif .PHONY: dist-alloy-packages-arm64 -dist-alloy-packages-arm64: dist/grafana-alloy-linux-arm64 +dist-alloy-packages-arm64: dist/alloy-linux-arm64 ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else From b6a163dcda166e439725688bf597b8324c2e428d Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:49:57 -0400 Subject: [PATCH 048/136] syntax: rename river tags to alloy --- syntax/encoding/alloyjson/alloyjson_test.go | 28 ++-- syntax/internal/syntaxtags/syntaxtags.go | 30 ++-- syntax/internal/syntaxtags/syntaxtags_test.go | 62 ++++---- syntax/internal/value/decode_test.go | 70 ++++---- syntax/internal/value/type_test.go | 2 +- syntax/internal/value/value_object_test.go | 46 +++--- syntax/internal/value/value_test.go | 4 +- syntax/syntax.go | 52 +++--- syntax/syntax_test.go | 22 +-- syntax/token/builder/builder_test.go | 54 +++---- syntax/token/builder/nested_defaults_test.go | 12 +- syntax/types.go | 6 +- syntax/vm/vm_benchmarks_test.go | 6 +- syntax/vm/vm_block_test.go | 150 +++++++++--------- syntax/vm/vm_errors_test.go | 8 +- syntax/vm/vm_stdlib_test.go | 6 +- syntax/vm/vm_test.go | 8 +- 17 files changed, 283 insertions(+), 283 deletions(-) diff --git a/syntax/encoding/alloyjson/alloyjson_test.go b/syntax/encoding/alloyjson/alloyjson_test.go index 3af2e02b30..2e9bf65269 100644 --- a/syntax/encoding/alloyjson/alloyjson_test.go +++ b/syntax/encoding/alloyjson/alloyjson_test.go @@ -201,7 +201,7 @@ func TestBlock(t *testing.T) { func TestBlock_Empty_Required_Block_Slice(t *testing.T) { type wrapper struct { - Blocks []testBlock `river:"some_block,block"` + Blocks []testBlock `alloy:"some_block,block"` } tt := []struct { @@ -224,19 +224,19 @@ func TestBlock_Empty_Required_Block_Slice(t *testing.T) { } type testBlock struct { - Number int `river:"number,attr,optional"` - String string `river:"string,attr,optional"` - Boolean bool `river:"boolean,attr,optional"` - Array []any `river:"array,attr,optional"` - Object map[string]any `river:"object,attr,optional"` - - Labeled []labeledBlock `river:"labeled_block,block,optional"` - Blocks []testBlock `river:"inner_block,block,optional"` + Number int `alloy:"number,attr,optional"` + String string `alloy:"string,attr,optional"` + Boolean bool `alloy:"boolean,attr,optional"` + Array []any `alloy:"array,attr,optional"` + Object map[string]any `alloy:"object,attr,optional"` + + Labeled []labeledBlock `alloy:"labeled_block,block,optional"` + Blocks []testBlock `alloy:"inner_block,block,optional"` } type labeledBlock struct { - TestBlock testBlock `river:",squash"` - Label string `river:",label"` + TestBlock testBlock `alloy:",squash"` + Label string `alloy:",label"` } func TestNilBody(t *testing.T) { @@ -300,8 +300,8 @@ func TestHideDefaults(t *testing.T) { } type defaultsBlock struct { - Name string `river:"name,attr,optional"` - Age int `river:"age,attr,optional"` + Name string `alloy:"name,attr,optional"` + Age int `alloy:"age,attr,optional"` } var _ syntax.Defaulter = (*defaultsBlock)(nil) @@ -315,7 +315,7 @@ func (d *defaultsBlock) SetToDefault() { func TestMapBlocks(t *testing.T) { type block struct { - Value map[string]any `river:"block,block,optional"` + Value map[string]any `alloy:"block,block,optional"` } val := block{Value: map[string]any{"field": "value"}} diff --git a/syntax/internal/syntaxtags/syntaxtags.go b/syntax/internal/syntaxtags/syntaxtags.go index 3c1f8f859b..d8262bbb84 100644 --- a/syntax/internal/syntaxtags/syntaxtags.go +++ b/syntax/internal/syntaxtags/syntaxtags.go @@ -109,11 +109,11 @@ func (f Field) IsLabel() bool { return f.Flags&FlagLabel != 0 } // Get returns the list of tagged fields for some struct type ty. Get panics if // ty is not a struct type. // -// Get examines each tagged field in ty for a river key. The river key is then +// Get examines each tagged field in ty for an alloy key. The alloy key is then // parsed as containing a name for the field, followed by a required // comma-separated list of options. The name may be empty for fields which do -// not require a name. Get will ignore any field that is not tagged with a -// river key. +// not require a name. Get will ignore any field that is not tagged with an +// alloy key. // // Get will treat anonymous struct fields as if the inner fields were fields in // the outer struct. @@ -121,31 +121,31 @@ func (f Field) IsLabel() bool { return f.Flags&FlagLabel != 0 } // Examples of struct field tags and their meanings: // // // Field is used as a required block named "my_block". -// Field struct{} `river:"my_block,block"` +// Field struct{} `alloy:"my_block,block"` // // // Field is used as an optional block named "my_block". -// Field struct{} `river:"my_block,block,optional"` +// Field struct{} `alloy:"my_block,block,optional"` // // // Field is used as a required attribute named "my_attr". -// Field string `river:"my_attr,attr"` +// Field string `alloy:"my_attr,attr"` // // // Field is used as an optional attribute named "my_attr". -// Field string `river:"my_attr,attr,optional"` +// Field string `alloy:"my_attr,attr,optional"` // // // Field is used for storing the label of the block which the struct // // represents. -// Field string `river:",label"` +// Field string `alloy:",label"` // // // Attributes and blocks inside of Field are exposed as top-level fields. -// Field struct{} `river:",squash"` +// Field struct{} `alloy:",squash"` // -// Blocks []struct{} `river:"my_block_prefix,enum"` +// Blocks []struct{} `alloy:"my_block_prefix,enum"` // -// With the exception of the `river:",label"` and `river:",squash" tags, all +// With the exception of the `alloy:",label"` and `alloy:",squash" tags, all // tagged fields must have a unique name. // // The type of tagged fields may be any Go type, with the exception of -// `river:",label"` tags, which must be strings. +// `alloy:",label"` tags, which must be strings. func Get(ty reflect.Type) []Field { if k := ty.Kind(); k != reflect.Struct { panic(fmt.Sprintf("syntaxtags: Get requires struct kind, got %s", k)) @@ -164,13 +164,13 @@ func Get(ty reflect.Type) []Field { panic(fmt.Sprintf("syntax: anonymous fields not supported %s", printPathToField(ty, field.Index))) } - tag, tagged := field.Tag.Lookup("river") + tag, tagged := field.Tag.Lookup("alloy") if !tagged { continue } if !field.IsExported() { - panic(fmt.Sprintf("syntax: river tag found on unexported field at %s", printPathToField(ty, field.Index))) + panic(fmt.Sprintf("syntax: alloy tag found on unexported field at %s", printPathToField(ty, field.Index))) } options := strings.SplitN(tag, ",", 2) @@ -195,7 +195,7 @@ func Get(ty reflect.Type) []Field { flags, ok := parseFlags(options[1]) if !ok { - panic(fmt.Sprintf("syntax: unrecognized river tag format %q at %s", tag, printPathToField(ty, tf.Index))) + panic(fmt.Sprintf("syntax: unrecognized alloy tag format %q at %s", tag, printPathToField(ty, tf.Index))) } tf.Flags = flags diff --git a/syntax/internal/syntaxtags/syntaxtags_test.go b/syntax/internal/syntaxtags/syntaxtags_test.go index 6c9b1b4633..e2ecc8ff02 100644 --- a/syntax/internal/syntaxtags/syntaxtags_test.go +++ b/syntax/internal/syntaxtags/syntaxtags_test.go @@ -13,13 +13,13 @@ func Test_Get(t *testing.T) { type Struct struct { IgnoreMe bool - ReqAttr string `river:"req_attr,attr"` - OptAttr string `river:"opt_attr,attr,optional"` - ReqBlock struct{} `river:"req_block,block"` - OptBlock struct{} `river:"opt_block,block,optional"` - ReqEnum []struct{} `river:"req_enum,enum"` - OptEnum []struct{} `river:"opt_enum,enum,optional"` - Label string `river:",label"` + ReqAttr string `alloy:"req_attr,attr"` + OptAttr string `alloy:"opt_attr,attr,optional"` + ReqBlock struct{} `alloy:"req_block,block"` + OptBlock struct{} `alloy:"opt_block,block,optional"` + ReqEnum []struct{} `alloy:"req_enum,enum"` + OptEnum []struct{} `alloy:"opt_enum,enum,optional"` + Label string `alloy:",label"` } fs := syntaxtags.Get(reflect.TypeOf(Struct{})) @@ -39,34 +39,34 @@ func Test_Get(t *testing.T) { func TestEmbedded(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr"` - InnerField2 string `river:"inner_field_2,attr"` + InnerField1 string `alloy:"inner_field_1,attr"` + InnerField2 string `alloy:"inner_field_2,attr"` } type Struct struct { - Field1 string `river:"parent_field_1,attr"` + Field1 string `alloy:"parent_field_1,attr"` InnerStruct - Field2 string `river:"parent_field_2,attr"` + Field2 string `alloy:"parent_field_2,attr"` } require.PanicsWithValue(t, "syntax: anonymous fields not supported syntaxtags_test.Struct.InnerStruct", func() { syntaxtags.Get(reflect.TypeOf(Struct{})) }) } func TestSquash(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr"` - InnerField2 string `river:"inner_field_2,attr"` + InnerField1 string `alloy:"inner_field_1,attr"` + InnerField2 string `alloy:"inner_field_2,attr"` } type Struct struct { - Field1 string `river:"parent_field_1,attr"` - Inner InnerStruct `river:",squash"` - Field2 string `river:"parent_field_2,attr"` + Field1 string `alloy:"parent_field_1,attr"` + Inner InnerStruct `alloy:",squash"` + Field2 string `alloy:"parent_field_2,attr"` } type StructWithPointer struct { - Field1 string `river:"parent_field_1,attr"` - Inner *InnerStruct `river:",squash"` - Field2 string `river:"parent_field_2,attr"` + Field1 string `alloy:"parent_field_1,attr"` + Inner *InnerStruct `alloy:",squash"` + Field2 string `alloy:"parent_field_2,attr"` } expect := []syntaxtags.Field{ @@ -101,16 +101,16 @@ func TestSquash(t *testing.T) { func TestDeepSquash(t *testing.T) { type Inner2Struct struct { - InnerField1 string `river:"inner_field_1,attr"` - InnerField2 string `river:"inner_field_2,attr"` + InnerField1 string `alloy:"inner_field_1,attr"` + InnerField2 string `alloy:"inner_field_2,attr"` } type InnerStruct struct { - Inner2Struct Inner2Struct `river:",squash"` + Inner2Struct Inner2Struct `alloy:",squash"` } type Struct struct { - Inner InnerStruct `river:",squash"` + Inner InnerStruct `alloy:",squash"` } expect := []syntaxtags.Field{ @@ -140,15 +140,15 @@ func Test_Get_Panics(t *testing.T) { t.Run("Tagged fields must be exported", func(t *testing.T) { type Struct struct { - attr string `river:"field,attr"` // nolint:unused //nolint:syntaxtags + attr string `alloy:"field,attr"` // nolint:unused //nolint:syntaxtags } - expect := `syntax: river tag found on unexported field at syntaxtags_test.Struct.attr` + expect := `syntax: alloy tag found on unexported field at syntaxtags_test.Struct.attr` expectPanic(t, expect, Struct{}) }) t.Run("Options are required", func(t *testing.T) { type Struct struct { - Attr string `river:"field"` //nolint:syntaxtags + Attr string `alloy:"field"` //nolint:syntaxtags } expect := `syntax: field syntaxtags_test.Struct.Attr tag is missing options` expectPanic(t, expect, Struct{}) @@ -156,8 +156,8 @@ func Test_Get_Panics(t *testing.T) { t.Run("Field names must be unique", func(t *testing.T) { type Struct struct { - Attr string `river:"field1,attr"` - Block string `river:"field1,block,optional"` //nolint:syntaxtags + Attr string `alloy:"field1,attr"` + Block string `alloy:"field1,block,optional"` //nolint:syntaxtags } expect := `syntax: field name field1 already used by syntaxtags_test.Struct.Attr` expectPanic(t, expect, Struct{}) @@ -165,7 +165,7 @@ func Test_Get_Panics(t *testing.T) { t.Run("Name is required for non-label field", func(t *testing.T) { type Struct struct { - Attr string `river:",attr"` //nolint:syntaxtags + Attr string `alloy:",attr"` //nolint:syntaxtags } expect := `syntaxtags: non-empty field name required at syntaxtags_test.Struct.Attr` expectPanic(t, expect, Struct{}) @@ -173,8 +173,8 @@ func Test_Get_Panics(t *testing.T) { t.Run("Only one label field may exist", func(t *testing.T) { type Struct struct { - Label1 string `river:",label"` - Label2 string `river:",label"` + Label1 string `alloy:",label"` + Label2 string `alloy:",label"` } expect := `syntax: label field already used by syntaxtags_test.Struct.Label2` expectPanic(t, expect, Struct{}) diff --git a/syntax/internal/value/decode_test.go b/syntax/internal/value/decode_test.go index a53b9553af..4908a68577 100644 --- a/syntax/internal/value/decode_test.go +++ b/syntax/internal/value/decode_test.go @@ -48,11 +48,11 @@ func TestDecode(t *testing.T) { // Declare some types to use for testing. Person2 is used as a struct // equivalent to Person, but with a different Go type to force casting. type Person struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` } type Person2 struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` } tt := []struct { @@ -288,9 +288,9 @@ func TestDecode_CustomTypes(t *testing.T) { } type customUnmarshaler struct { - UnmarshalCalled bool `river:"unmarshal_called,attr,optional"` - DefaultCalled bool `river:"default_called,attr,optional"` - ValidateCalled bool `river:"validate_called,attr,optional"` + UnmarshalCalled bool `alloy:"unmarshal_called,attr,optional"` + DefaultCalled bool `alloy:"default_called,attr,optional"` + ValidateCalled bool `alloy:"validate_called,attr,optional"` } func (cu *customUnmarshaler) UnmarshalRiver(f func(interface{}) error) error { @@ -359,9 +359,9 @@ func TestDecode_ErrorChain(t *testing.T) { type Target struct { Key struct { Object struct { - Field1 []int `river:"field1,attr"` - } `river:"object,attr"` - } `river:"key,attr"` + Field1 []int `alloy:"field1,attr"` + } `alloy:"object,attr"` + } `alloy:"key,attr"` } val := value.Object(map[string]value.Value{ @@ -457,14 +457,14 @@ func TestDecode_CustomConvert(t *testing.T) { func TestDecode_SquashedFields(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr,optional"` - InnerField2 string `river:"inner_field_2,attr,optional"` + InnerField1 string `alloy:"inner_field_1,attr,optional"` + InnerField2 string `alloy:"inner_field_2,attr,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner InnerStruct `river:",squash"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner InnerStruct `alloy:",squash"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -492,14 +492,14 @@ func TestDecode_SquashedFields(t *testing.T) { func TestDecode_SquashedFields_Pointer(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr,optional"` - InnerField2 string `river:"inner_field_2,attr,optional"` + InnerField1 string `alloy:"inner_field_1,attr,optional"` + InnerField2 string `alloy:"inner_field_2,attr,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner *InnerStruct `river:",squash"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner *InnerStruct `alloy:",squash"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -527,11 +527,11 @@ func TestDecode_SquashedFields_Pointer(t *testing.T) { func TestDecode_Slice(t *testing.T) { type Block struct { - Attr int `river:"attr,attr"` + Attr int `alloy:"attr,attr"` } type Struct struct { - Blocks []Block `river:"block.a,block,optional"` + Blocks []Block `alloy:"block.a,block,optional"` } var ( @@ -563,19 +563,19 @@ func TestDecode_Slice(t *testing.T) { func TestDecode_SquashedSlice(t *testing.T) { type Block struct { - Attr int `river:"attr,attr"` + Attr int `alloy:"attr,attr"` } type InnerStruct struct { - BlockA Block `river:"a,block,optional"` - BlockB Block `river:"b,block,optional"` - BlockC Block `river:"c,block,optional"` + BlockA Block `alloy:"a,block,optional"` + BlockB Block `alloy:"b,block,optional"` + BlockC Block `alloy:"c,block,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner []InnerStruct `river:"block,enum"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner []InnerStruct `alloy:"block,enum"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -611,19 +611,19 @@ func TestDecode_SquashedSlice(t *testing.T) { func TestDecode_SquashedSlice_Pointer(t *testing.T) { type Block struct { - Attr int `river:"attr,attr"` + Attr int `alloy:"attr,attr"` } type InnerStruct struct { - BlockA *Block `river:"a,block,optional"` - BlockB *Block `river:"b,block,optional"` - BlockC *Block `river:"c,block,optional"` + BlockA *Block `alloy:"a,block,optional"` + BlockB *Block `alloy:"b,block,optional"` + BlockC *Block `alloy:"c,block,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner []InnerStruct `river:"block,enum"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner []InnerStruct `alloy:"block,enum"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -703,7 +703,7 @@ func TestDecode_KnownTypes_Any(t *testing.T) { }, { input: struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` }{Name: "John"}, expect: map[string]any{"name": "John"}, diff --git a/syntax/internal/value/type_test.go b/syntax/internal/value/type_test.go index 7a595d1a51..066089ed8c 100644 --- a/syntax/internal/value/type_test.go +++ b/syntax/internal/value/type_test.go @@ -43,7 +43,7 @@ var typeTests = []struct { // A slice of labeled blocks should be an object. {[]struct { - Label string `river:",label"` + Label string `alloy:",label"` }{}, value.TypeObject}, {map[string]interface{}{}, value.TypeObject}, diff --git a/syntax/internal/value/value_object_test.go b/syntax/internal/value/value_object_test.go index 1cb90eb872..dabb9ec3ca 100644 --- a/syntax/internal/value/value_object_test.go +++ b/syntax/internal/value/value_object_test.go @@ -11,23 +11,23 @@ import ( // represented correctly. func TestBlockRepresentation(t *testing.T) { type UnlabledBlock struct { - Value int `river:"value,attr"` + Value int `alloy:"value,attr"` } type LabeledBlock struct { - Value int `river:"value,attr"` - Label string `river:",label"` + Value int `alloy:"value,attr"` + Label string `alloy:",label"` } type OuterBlock struct { - Attr1 string `river:"attr_1,attr"` - Attr2 string `river:"attr_2,attr"` + Attr1 string `alloy:"attr_1,attr"` + Attr2 string `alloy:"attr_2,attr"` - UnlabledBlock1 UnlabledBlock `river:"unlabeled.a,block"` - UnlabledBlock2 UnlabledBlock `river:"unlabeled.b,block"` - UnlabledBlock3 UnlabledBlock `river:"other_unlabeled,block"` + UnlabledBlock1 UnlabledBlock `alloy:"unlabeled.a,block"` + UnlabledBlock2 UnlabledBlock `alloy:"unlabeled.b,block"` + UnlabledBlock3 UnlabledBlock `alloy:"other_unlabeled,block"` - LabeledBlock1 LabeledBlock `river:"labeled.a,block"` - LabeledBlock2 LabeledBlock `river:"labeled.b,block"` - LabeledBlock3 LabeledBlock `river:"other_labeled,block"` + LabeledBlock1 LabeledBlock `alloy:"labeled.a,block"` + LabeledBlock2 LabeledBlock `alloy:"labeled.b,block"` + LabeledBlock3 LabeledBlock `alloy:"other_labeled,block"` } val := OuterBlock{ @@ -101,14 +101,14 @@ func TestBlockRepresentation(t *testing.T) { // blocks are represented correctly. func TestSquashedBlockRepresentation(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr,optional"` - InnerField2 string `river:"inner_field_2,attr,optional"` + InnerField1 string `alloy:"inner_field_1,attr,optional"` + InnerField2 string `alloy:"inner_field_2,attr,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner InnerStruct `river:",squash"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner InnerStruct `alloy:",squash"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } val := OuterStruct{ @@ -139,18 +139,18 @@ func TestSquashedBlockRepresentation(t *testing.T) { func TestSliceOfBlocks(t *testing.T) { type UnlabledBlock struct { - Value int `river:"value,attr"` + Value int `alloy:"value,attr"` } type LabeledBlock struct { - Value int `river:"value,attr"` - Label string `river:",label"` + Value int `alloy:"value,attr"` + Label string `alloy:",label"` } type OuterBlock struct { - Attr1 string `river:"attr_1,attr"` - Attr2 string `river:"attr_2,attr"` + Attr1 string `alloy:"attr_1,attr"` + Attr2 string `alloy:"attr_2,attr"` - Unlabeled []UnlabledBlock `river:"unlabeled,block"` - Labeled []LabeledBlock `river:"labeled,block"` + Unlabeled []UnlabledBlock `alloy:"unlabeled,block"` + Labeled []LabeledBlock `alloy:"labeled,block"` } val := OuterBlock{ diff --git a/syntax/internal/value/value_test.go b/syntax/internal/value/value_test.go index ef75a8403f..fbebcabdd7 100644 --- a/syntax/internal/value/value_test.go +++ b/syntax/internal/value/value_test.go @@ -13,7 +13,7 @@ import ( // throughout values with a key lookup. func TestEncodeKeyLookup(t *testing.T) { type Body struct { - Data pointerMarshaler `river:"data,attr"` + Data pointerMarshaler `alloy:"data,attr"` } tt := []struct { @@ -230,7 +230,7 @@ func TestValue_Call(t *testing.T) { func TestValue_Interface_In_Array(t *testing.T) { type Container struct { - Field io.Closer `river:"field,attr"` + Field io.Closer `alloy:"field,attr"` } val := value.Encode(Container{Field: io.NopCloser(nil)}) diff --git a/syntax/syntax.go b/syntax/syntax.go index 44a256603e..8e32975cba 100644 --- a/syntax/syntax.go +++ b/syntax/syntax.go @@ -18,16 +18,16 @@ import ( ) // Marshal returns the pretty-printed encoding of v as a River configuration -// file. v must be a Go struct with river struct tags which determine the +// file. v must be a Go struct with alloy struct tags which determine the // structure of the resulting file. // // Marshal traverses the value v recursively, encoding each struct field as a -// River block or River attribute, based on the flags provided to the river +// River block or River attribute, based on the flags provided to the alloy // struct tag. // // When a struct field represents a River block, Marshal creates a new block // and recursively encodes the value as the body of the block. The name of the -// created block is taken from the name specified by the river struct tag. +// created block is taken from the name specified by the alloy struct tag. // // Struct fields which represent River blocks must be either a Go struct or a // slice of Go structs. When the field is a Go struct, its value is encoded as @@ -40,7 +40,7 @@ import ( // the string type. When specified, there must not be more than one struct // field which represents a block label. // -// The river tag specifies a name, possibly followed by a comma-separated list +// The alloy tag specifies a name, possibly followed by a comma-separated list // of options. The name must be empty if the provided options do not support a // name being defined. The following provides examples for all supported struct // field tags with their meanings: @@ -48,62 +48,62 @@ import ( // // Field appears as a block named "example". It will always appear in the // // resulting encoding. When decoding, "example" is treated as a required // // block and must be present in the source text. -// Field struct{...} `river:"example,block"` +// Field struct{...} `alloy:"example,block"` // // // Field appears as a set of blocks named "example." It will appear in the // // resulting encoding if there is at least one element in the slice. When // // decoding, "example" is treated as a required block and at least one // // "example" block must be present in the source text. -// Field []struct{...} `river:"example,block"` +// Field []struct{...} `alloy:"example,block"` // // // Field appears as block named "example." It will always appear in the // // resulting encoding. When decoding, "example" is treated as an optional // // block and can be omitted from the source text. -// Field struct{...} `river:"example,block,optional"` +// Field struct{...} `alloy:"example,block,optional"` // // // Field appears as a set of blocks named "example." It will appear in the // // resulting encoding if there is at least one element in the slice. When // // decoding, "example" is treated as an optional block and can be omitted // // from the source text. -// Field []struct{...} `river:"example,block,optional"` +// Field []struct{...} `alloy:"example,block,optional"` // // // Field appears as an attribute named "example." It will always appear in // // the resulting encoding. When decoding, "example" is treated as a // // required attribute and must be present in the source text. -// Field bool `river:"example,attr"` +// Field bool `alloy:"example,attr"` // // // Field appears as an attribute named "example." If the field's value is // // the Go zero value, "example" is omitted from the resulting encoding. // // When decoding, "example" is treated as an optional attribute and can be // // omitted from the source text. -// Field bool `river:"example,attr,optional"` +// Field bool `alloy:"example,attr,optional"` // // // The value of Field appears as the block label for the struct being // // converted into a block. When decoding, a block label must be provided. -// Field string `river:",label"` +// Field string `alloy:",label"` // // // The inner attributes and blocks of Field are exposed as top-level // // attributes and blocks of the outer struct. -// Field struct{...} `river:",squash"` +// Field struct{...} `alloy:",squash"` // // // Field appears as a set of blocks starting with "example.". Only the // // first set element in the struct will be encoded. Each field in struct // // must be a block. The name of the block is prepended to the enum name. // // When decoding, enum blocks are treated as optional blocks and can be // // omitted from the source text. -// Field []struct{...} `river:"example,enum"` +// Field []struct{...} `alloy:"example,enum"` // -// // Field is equivalent to `river:"example,enum"`. -// Field []struct{...} `river:"example,enum,optional"` +// // Field is equivalent to `alloy:"example,enum"`. +// Field []struct{...} `alloy:"example,enum,optional"` // -// If a river tag specifies a required or optional block, the name is permitted +// If an alloy tag specifies a required or optional block, the name is permitted // to contain period `.` characters. // -// Marshal will panic if it encounters a struct with invalid river tags. +// Marshal will panic if it encounters a struct with invalid alloy tags. // // When a struct field represents a River attribute, Marshal encodes the struct // value as a River value. The attribute name will be taken from the name -// specified by the river struct tag. See MarshalValue for the rules used to +// specified by the alloy struct tag. See MarshalValue for the rules used to // convert a Go value into a River value. func Marshal(v interface{}) ([]byte, error) { var buf bytes.Buffer @@ -152,13 +152,13 @@ func Marshal(v interface{}) ([]byte, error) { // // Field appears as an object field named "my_name". It will always // // appear in the resulting encoding. When decoding, "my_name" is treated // // as a required attribute and must be present in the source text. -// Field bool `river:"my_name,attr"` +// Field bool `alloy:"my_name,attr"` // // // Field appears as an object field named "my_name". If the field's value // // is the Go zero value, "example" is omitted from the resulting encoding. // // When decoding, "my_name" is treated as an optional attribute and can be // // omitted from the source text. -// Field bool `river:"my_name,attr,optional"` +// Field bool `alloy:"my_name,attr,optional"` func MarshalValue(v interface{}) ([]byte, error) { var buf bytes.Buffer if err := NewEncoder(&buf).EncodeValue(v); err != nil { @@ -219,10 +219,10 @@ func (enc *Encoder) EncodeValue(v interface{}) error { // unmarshaling into a map. // // To unmarshal a River body into a struct, Unmarshal matches incoming -// attributes and blocks to the river struct tags specified by v. Incoming -// attribute and blocks which do not match to a river struct tag cause a +// attributes and blocks to the alloy struct tags specified by v. Incoming +// attribute and blocks which do not match to an alloy struct tag cause a // decoding error. Additionally, any attribute or block marked as required by -// the river struct tag that are not present in the source text will generate a +// the alloy struct tag that are not present in the source text will generate a // decoding error. // // To unmarshal a list of River blocks into a slice, Unmarshal resets the slice @@ -254,9 +254,9 @@ func Unmarshal(in []byte, v interface{}) error { // ConvertibleIntoCapsule. // // To unmarshal a River object into a struct, UnmarshalValue matches incoming -// object fields to the river struct tags specified by v. Incoming object -// fields which do not match to a river struct tag cause a decoding error. -// Additionally, any object field marked as required by the river struct +// object fields to the alloy struct tags specified by v. Incoming object +// fields which do not match to an alloy struct tag cause a decoding error. +// Additionally, any object field marked as required by the alloy struct // tag that are not present in the source text will generate a decoding error. // // To unmarshal River into an interface value, Unmarshal stores one of the diff --git a/syntax/syntax_test.go b/syntax/syntax_test.go index 747c2c4b6c..28daa923d8 100644 --- a/syntax/syntax_test.go +++ b/syntax/syntax_test.go @@ -12,23 +12,23 @@ func ExampleUnmarshal() { // book. type Character struct { // Name of the character. The name is decoded from the block label. - Name string `river:",label"` + Name string `alloy:",label"` // Age of the character. The age is a required attribute within the block, // and must be set in the config. - Age int `river:"age,attr"` + Age int `alloy:"age,attr"` // Location the character lives in. The location is an optional attribute // within the block. Optional attributes do not have to bet set. - Location string `river:"location,attr,optional"` + Location string `alloy:"location,attr,optional"` } // Book is our overall type where we decode the overall River file into. type Book struct { // Title of the book (required attribute). - Title string `river:"title,attr"` + Title string `alloy:"title,attr"` // List of characters. Each character is a labeled block. The optional tag // means that it is valid not provide a character block. Decoding into a // slice permits there to be multiple specified character blocks. - Characters []*Character `river:"character,block,optional"` + Characters []*Character `alloy:"character,block,optional"` } // Create our book with two characters. @@ -76,7 +76,7 @@ func ExampleUnmarshal_functions() { _ = os.Setenv("EXAMPLE", "Jane Doe") type Data struct { - String string `river:"string,attr"` + String string `alloy:"string,attr"` } input := ` @@ -106,9 +106,9 @@ func ExampleUnmarshalValue() { func ExampleMarshal() { type Person struct { - Name string `river:"name,attr"` - Age int `river:"age,attr"` - Location string `river:"location,attr,optional"` + Name string `alloy:"name,attr"` + Age int `alloy:"age,attr"` + Location string `alloy:"location,attr,optional"` } p := Person{ @@ -129,8 +129,8 @@ func ExampleMarshal() { func ExampleMarshalValue() { type Person struct { - Name string `river:"name,attr"` - Age int `river:"age,attr"` + Name string `alloy:"name,attr"` + Age int `alloy:"age,attr"` } p := Person{ diff --git a/syntax/token/builder/builder_test.go b/syntax/token/builder/builder_test.go index 9869ad6dea..6bb5361af9 100644 --- a/syntax/token/builder/builder_test.go +++ b/syntax/token/builder/builder_test.go @@ -103,8 +103,8 @@ func TestBuilder_GoEncode_SortMapKeys(t *testing.T) { f := builder.NewFile() type Ordered struct { - SomeKey string `river:"some_key,attr"` - OtherKey string `river:"other_key,attr"` + SomeKey string `alloy:"some_key,attr"` + OtherKey string `alloy:"other_key,attr"` } // Maps are unordered because you can't iterate over their keys in a @@ -135,14 +135,14 @@ func TestBuilder_GoEncode_SortMapKeys(t *testing.T) { func TestBuilder_AppendFrom(t *testing.T) { type InnerBlock struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } type Structure struct { - Field string `river:"field,attr"` + Field string `alloy:"field,attr"` - Block InnerBlock `river:"block,block"` - OtherBlocks []InnerBlock `river:"other_block,block"` + Block InnerBlock `alloy:"block,block"` + OtherBlocks []InnerBlock `alloy:"other_block,block"` } f := builder.NewFile() @@ -177,19 +177,19 @@ func TestBuilder_AppendFrom(t *testing.T) { func TestBuilder_AppendFrom_EnumSlice(t *testing.T) { type InnerBlock struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } type EnumBlock struct { - BlockA InnerBlock `river:"a,block,optional"` - BlockB InnerBlock `river:"b,block,optional"` - BlockC InnerBlock `river:"c,block,optional"` + BlockA InnerBlock `alloy:"a,block,optional"` + BlockB InnerBlock `alloy:"b,block,optional"` + BlockC InnerBlock `alloy:"c,block,optional"` } type Structure struct { - Field string `river:"field,attr"` + Field string `alloy:"field,attr"` - OtherBlocks []EnumBlock `river:"block,enum"` + OtherBlocks []EnumBlock `alloy:"block,enum"` } f := builder.NewFile() @@ -223,19 +223,19 @@ func TestBuilder_AppendFrom_EnumSlice(t *testing.T) { func TestBuilder_AppendFrom_EnumSlice_Pointer(t *testing.T) { type InnerBlock struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } type EnumBlock struct { - BlockA *InnerBlock `river:"a,block,optional"` - BlockB *InnerBlock `river:"b,block,optional"` - BlockC *InnerBlock `river:"c,block,optional"` + BlockA *InnerBlock `alloy:"a,block,optional"` + BlockB *InnerBlock `alloy:"b,block,optional"` + BlockC *InnerBlock `alloy:"c,block,optional"` } type Structure struct { - Field string `river:"field,attr"` + Field string `alloy:"field,attr"` - OtherBlocks []EnumBlock `river:"block,enum"` + OtherBlocks []EnumBlock `alloy:"block,enum"` } f := builder.NewFile() @@ -269,10 +269,10 @@ func TestBuilder_AppendFrom_EnumSlice_Pointer(t *testing.T) { func TestBuilder_SkipOptional(t *testing.T) { type Structure struct { - OptFieldA string `river:"opt_field_a,attr,optional"` - OptFieldB string `river:"opt_field_b,attr,optional"` - ReqFieldA string `river:"req_field_a,attr"` - ReqFieldB string `river:"req_field_b,attr"` + OptFieldA string `alloy:"opt_field_a,attr,optional"` + OptFieldB string `alloy:"opt_field_b,attr,optional"` + ReqFieldA string `alloy:"req_field_a,attr"` + ReqFieldB string `alloy:"req_field_b,attr"` } f := builder.NewFile() @@ -346,14 +346,14 @@ func TestBuilder_GoEncode_Tokenizer(t *testing.T) { func TestBuilder_ValueOverrideHook(t *testing.T) { type InnerBlock struct { - AnotherField string `river:"another_field,attr"` + AnotherField string `alloy:"another_field,attr"` } type Structure struct { - Field string `river:"field,attr"` + Field string `alloy:"field,attr"` - Block InnerBlock `river:"block,block"` - OtherBlocks []InnerBlock `river:"other_block,block"` + Block InnerBlock `alloy:"block,block"` + OtherBlocks []InnerBlock `alloy:"other_block,block"` } f := builder.NewFile() @@ -391,7 +391,7 @@ func TestBuilder_ValueOverrideHook(t *testing.T) { func TestBuilder_MapBlocks(t *testing.T) { type block struct { - Value map[string]any `river:"block,block,optional"` + Value map[string]any `alloy:"block,block,optional"` } f := builder.NewFile() diff --git a/syntax/token/builder/nested_defaults_test.go b/syntax/token/builder/nested_defaults_test.go index bd6048a188..90eec5e4d4 100644 --- a/syntax/token/builder/nested_defaults_test.go +++ b/syntax/token/builder/nested_defaults_test.go @@ -165,7 +165,7 @@ func TestPtrPropagatingDefaultWithNil(t *testing.T) { // StructPropagatingDefault has the outer defaults matching the inner block's defaults. The inner block is a struct. type StructPropagatingDefault struct { - Inner AttrWithDefault `river:"inner,block,optional"` + Inner AttrWithDefault `alloy:"inner,block,optional"` } func (o *StructPropagatingDefault) SetToDefault() { @@ -176,7 +176,7 @@ func (o *StructPropagatingDefault) SetToDefault() { // PtrPropagatingDefault has the outer defaults matching the inner block's defaults. The inner block is a pointer. type PtrPropagatingDefault struct { - Inner *AttrWithDefault `river:"inner,block,optional"` + Inner *AttrWithDefault `alloy:"inner,block,optional"` } func (o *PtrPropagatingDefault) SetToDefault() { @@ -187,7 +187,7 @@ func (o *PtrPropagatingDefault) SetToDefault() { // MismatchingDefault has the outer defaults NOT matching the inner block's defaults. The inner block is a pointer. type MismatchingDefault struct { - Inner *AttrWithDefault `river:"inner,block,optional"` + Inner *AttrWithDefault `alloy:"inner,block,optional"` } func (o *MismatchingDefault) SetToDefault() { @@ -198,7 +198,7 @@ func (o *MismatchingDefault) SetToDefault() { // ZeroDefault has the outer defaults setting to zero values. The inner block is a pointer. type ZeroDefault struct { - Inner *AttrWithDefault `river:"inner,block,optional"` + Inner *AttrWithDefault `alloy:"inner,block,optional"` } func (o *ZeroDefault) SetToDefault() { @@ -207,12 +207,12 @@ func (o *ZeroDefault) SetToDefault() { // NoDefaultDefined has no defaults defined. The inner block is a pointer. type NoDefaultDefined struct { - Inner *AttrWithDefault `river:"inner,block,optional"` + Inner *AttrWithDefault `alloy:"inner,block,optional"` } // AttrWithDefault has a default value of a non-zero number. type AttrWithDefault struct { - Number int `river:"number,attr,optional"` + Number int `alloy:"number,attr,optional"` } func (i *AttrWithDefault) SetToDefault() { diff --git a/syntax/types.go b/syntax/types.go index 1005b56da8..585c71b273 100644 --- a/syntax/types.go +++ b/syntax/types.go @@ -24,7 +24,7 @@ type Unmarshaler interface { // UnmarshalRiver is invoked when decoding a River value into a Go value. f // should be called with a pointer to a value to decode into. UnmarshalRiver // will not be called on types which are squashed into the parent struct - // using `river:",squash"`. + // using `alloy:",squash"`. UnmarshalRiver(f func(v interface{}) error) error } @@ -33,7 +33,7 @@ type Unmarshaler interface { type Defaulter interface { // SetToDefault is called when evaluating a block or body to set the value // to its defaults. SetToDefault will not be called on types which are - // squashed into the parent struct using `river:",squash"`. + // squashed into the parent struct using `alloy:",squash"`. SetToDefault() } @@ -42,7 +42,7 @@ type Defaulter interface { type Validator interface { // Validate is called when evaluating a block or body to enforce the // value is valid. Validate will not be called on types which are - // squashed into the parent struct using `river:",squash"`. + // squashed into the parent struct using `alloy:",squash"`. Validate() error } diff --git a/syntax/vm/vm_benchmarks_test.go b/syntax/vm/vm_benchmarks_test.go index 24938b6206..0d1e37335d 100644 --- a/syntax/vm/vm_benchmarks_test.go +++ b/syntax/vm/vm_benchmarks_test.go @@ -56,9 +56,9 @@ func BenchmarkExprs(b *testing.B) { age = 42, }`, expect: struct { - Name string `river:"name,attr"` - Age int `river:"age,attr"` - Country string `river:"country,attr,optional"` + Name string `alloy:"name,attr"` + Age int `alloy:"age,attr"` + Country string `alloy:"country,attr,optional"` }{ Name: "John Doe", Age: 42, diff --git a/syntax/vm/vm_block_test.go b/syntax/vm/vm_block_test.go index 7b4cd34a68..fd39e437b7 100644 --- a/syntax/vm/vm_block_test.go +++ b/syntax/vm/vm_block_test.go @@ -16,14 +16,14 @@ import ( func TestVM_File(t *testing.T) { type block struct { - String string `river:"string,attr"` - Number int `river:"number,attr,optional"` + String string `alloy:"string,attr"` + Number int `alloy:"number,attr,optional"` } type file struct { - SettingA int `river:"setting_a,attr"` - SettingB int `river:"setting_b,attr,optional"` + SettingA int `alloy:"setting_a,attr"` + SettingB int `alloy:"setting_b,attr,optional"` - Block block `river:"some_block,block,optional"` + Block block `alloy:"some_block,block,optional"` } input := ` @@ -54,8 +54,8 @@ func TestVM_File(t *testing.T) { func TestVM_Block_Attributes(t *testing.T) { t.Run("Decodes attributes", func(t *testing.T) { type block struct { - Number int `river:"number,attr"` - String string `river:"string,attr"` + Number int `alloy:"number,attr"` + String string `alloy:"string,attr"` } input := `some_block { @@ -72,7 +72,7 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Fails if attribute used as block", func(t *testing.T) { type block struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } input := `some_block { @@ -86,8 +86,8 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Fails if required attributes are not present", func(t *testing.T) { type block struct { - Number int `river:"number,attr"` - String string `river:"string,attr"` + Number int `alloy:"number,attr"` + String string `alloy:"string,attr"` } input := `some_block { @@ -101,8 +101,8 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Succeeds if optional attributes are not present", func(t *testing.T) { type block struct { - Number int `river:"number,attr"` - String string `river:"string,attr,optional"` + Number int `alloy:"number,attr"` + String string `alloy:"string,attr,optional"` } input := `some_block { @@ -118,7 +118,7 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Fails if attribute is not defined in struct", func(t *testing.T) { type block struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } input := `some_block { @@ -133,7 +133,7 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Tests decoding into an interface", func(t *testing.T) { type block struct { - Anything interface{} `river:"anything,attr"` + Anything interface{} `alloy:"anything,attr"` } tests := []struct { @@ -169,10 +169,10 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Supports arbitrarily nested struct pointer fields", func(t *testing.T) { type block struct { - NumberA int `river:"number_a,attr"` - NumberB *int `river:"number_b,attr"` - NumberC **int `river:"number_c,attr"` - NumberD ***int `river:"number_d,attr"` + NumberA int `alloy:"number_a,attr"` + NumberB *int `alloy:"number_b,attr"` + NumberC **int `alloy:"number_c,attr"` + NumberD ***int `alloy:"number_d,attr"` } input := `some_block { @@ -193,14 +193,14 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Supports squashed attributes", func(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr,optional"` - InnerField2 string `river:"inner_field_2,attr,optional"` + InnerField1 string `alloy:"inner_field_1,attr,optional"` + InnerField2 string `alloy:"inner_field_2,attr,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner InnerStruct `river:",squash"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner InnerStruct `alloy:",squash"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -229,14 +229,14 @@ func TestVM_Block_Attributes(t *testing.T) { t.Run("Supports squashed attributes in pointers", func(t *testing.T) { type InnerStruct struct { - InnerField1 string `river:"inner_field_1,attr,optional"` - InnerField2 string `river:"inner_field_2,attr,optional"` + InnerField1 string `alloy:"inner_field_1,attr,optional"` + InnerField2 string `alloy:"inner_field_2,attr,optional"` } type OuterStruct struct { - OuterField1 string `river:"outer_field_1,attr,optional"` - Inner *InnerStruct `river:",squash"` - OuterField2 string `river:"outer_field_2,attr,optional"` + OuterField1 string `alloy:"outer_field_1,attr,optional"` + Inner *InnerStruct `alloy:",squash"` + OuterField2 string `alloy:"outer_field_2,attr,optional"` } var ( @@ -266,13 +266,13 @@ func TestVM_Block_Attributes(t *testing.T) { func TestVM_Block_Children_Blocks(t *testing.T) { type childBlock struct { - Attr bool `river:"attr,attr"` + Attr bool `alloy:"attr,attr"` } t.Run("Decodes children blocks", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Child childBlock `river:"child.block,block"` + Value int `alloy:"value,attr"` + Child childBlock `alloy:"child.block,block"` } input := `some_block { @@ -290,8 +290,8 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Decodes multiple instances of children blocks", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Children []childBlock `river:"child.block,block"` + Value int `alloy:"value,attr"` + Children []childBlock `alloy:"child.block,block"` } input := `some_block { @@ -314,8 +314,8 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Decodes multiple instances of children blocks into an array", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Children [3]childBlock `river:"child.block,block"` + Value int `alloy:"value,attr"` + Children [3]childBlock `alloy:"child.block,block"` } input := `some_block { @@ -337,7 +337,7 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Fails if block used as an attribute", func(t *testing.T) { type block struct { - Child childBlock `river:"child,block"` + Child childBlock `alloy:"child,block"` } input := `some_block { @@ -351,8 +351,8 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Fails if required children blocks are not present", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Child childBlock `river:"child.block,block"` + Value int `alloy:"value,attr"` + Child childBlock `alloy:"child.block,block"` } input := `some_block { @@ -366,8 +366,8 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Succeeds if optional children blocks are not present", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Child childBlock `river:"child.block,block,optional"` + Value int `alloy:"value,attr"` + Child childBlock `alloy:"child.block,block,optional"` } input := `some_block { @@ -382,7 +382,7 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Fails if child block is not defined in struct", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` + Value int `alloy:"value,attr"` } input := `some_block { @@ -398,10 +398,10 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Supports arbitrarily nested struct pointer fields", func(t *testing.T) { type block struct { - BlockA childBlock `river:"block_a,block"` - BlockB *childBlock `river:"block_b,block"` - BlockC **childBlock `river:"block_c,block"` - BlockD ***childBlock `river:"block_d,block"` + BlockA childBlock `alloy:"block_a,block"` + BlockB *childBlock `alloy:"block_b,block"` + BlockC **childBlock `alloy:"block_c,block"` + BlockD ***childBlock `alloy:"block_d,block"` } input := `some_block { @@ -422,14 +422,14 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Supports squashed blocks", func(t *testing.T) { type InnerStruct struct { - Inner1 childBlock `river:"inner_block_1,block"` - Inner2 childBlock `river:"inner_block_2,block"` + Inner1 childBlock `alloy:"inner_block_1,block"` + Inner2 childBlock `alloy:"inner_block_2,block"` } type OuterStruct struct { - Outer1 childBlock `river:"outer_block_1,block"` - Inner InnerStruct `river:",squash"` - Outer2 childBlock `river:"outer_block_2,block"` + Outer1 childBlock `alloy:"outer_block_1,block"` + Inner InnerStruct `alloy:",squash"` + Outer2 childBlock `alloy:"outer_block_2,block"` } var ( @@ -458,14 +458,14 @@ func TestVM_Block_Children_Blocks(t *testing.T) { t.Run("Supports squashed blocks in pointers", func(t *testing.T) { type InnerStruct struct { - Inner1 *childBlock `river:"inner_block_1,block"` - Inner2 *childBlock `river:"inner_block_2,block"` + Inner1 *childBlock `alloy:"inner_block_1,block"` + Inner2 *childBlock `alloy:"inner_block_2,block"` } type OuterStruct struct { - Outer1 childBlock `river:"outer_block_1,block"` - Inner *InnerStruct `river:",squash"` - Outer2 childBlock `river:"outer_block_2,block"` + Outer1 childBlock `alloy:"outer_block_1,block"` + Inner *InnerStruct `alloy:",squash"` + Outer2 childBlock `alloy:"outer_block_2,block"` } var ( @@ -497,20 +497,20 @@ func TestVM_Block_Children_Blocks(t *testing.T) { func TestVM_Block_Enum_Block(t *testing.T) { type childBlock struct { - Attr int `river:"attr,attr"` + Attr int `alloy:"attr,attr"` } type enumBlock struct { - BlockA *childBlock `river:"a,block,optional"` - BlockB *childBlock `river:"b,block,optional"` - BlockC *childBlock `river:"c,block,optional"` - BlockD *childBlock `river:"d,block,optional"` + BlockA *childBlock `alloy:"a,block,optional"` + BlockB *childBlock `alloy:"b,block,optional"` + BlockC *childBlock `alloy:"c,block,optional"` + BlockD *childBlock `alloy:"d,block,optional"` } t.Run("Decodes enum blocks", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Blocks []*enumBlock `river:"child,enum,optional"` + Value int `alloy:"value,attr"` + Blocks []*enumBlock `alloy:"child,enum,optional"` } input := `some_block { @@ -534,8 +534,8 @@ func TestVM_Block_Enum_Block(t *testing.T) { t.Run("Decodes multiple enum blocks", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Blocks []*enumBlock `river:"child,enum,optional"` + Value int `alloy:"value,attr"` + Blocks []*enumBlock `alloy:"child,enum,optional"` } input := `some_block { @@ -563,8 +563,8 @@ func TestVM_Block_Enum_Block(t *testing.T) { t.Run("Decodes multiple enum blocks with repeating blocks", func(t *testing.T) { type block struct { - Value int `river:"value,attr"` - Blocks []*enumBlock `river:"child,enum,optional"` + Value int `alloy:"value,attr"` + Blocks []*enumBlock `alloy:"child,enum,optional"` } input := `some_block { @@ -596,7 +596,7 @@ func TestVM_Block_Enum_Block(t *testing.T) { func TestVM_Block_Label(t *testing.T) { t.Run("Decodes label into string field", func(t *testing.T) { type block struct { - Label string `river:",label"` + Label string `alloy:",label"` } input := `some_block "label_value_1" {}` @@ -619,7 +619,7 @@ func TestVM_Block_Label(t *testing.T) { t.Run("Block must have label if struct accepts label", func(t *testing.T) { type block struct { - Label string `river:",label"` + Label string `alloy:",label"` } input := `some_block {}` @@ -631,7 +631,7 @@ func TestVM_Block_Label(t *testing.T) { t.Run("Block must have non-empty label if struct accepts label", func(t *testing.T) { type block struct { - Label string `river:",label"` + Label string `alloy:",label"` } input := `some_block "" {}` @@ -644,8 +644,8 @@ func TestVM_Block_Label(t *testing.T) { func TestVM_Block_Unmarshaler(t *testing.T) { type OuterBlock struct { - FieldA string `river:"field_a,attr"` - Settings Setting `river:"some.settings,block"` + FieldA string `alloy:"field_a,attr"` + Settings Setting `alloy:"some.settings,block"` } input := ` @@ -670,7 +670,7 @@ func TestVM_Block_Unmarshaler(t *testing.T) { func TestVM_Block_UnmarshalToMap(t *testing.T) { type OuterBlock struct { - Settings map[string]interface{} `river:"some.settings,block"` + Settings map[string]interface{} `alloy:"some.settings,block"` } tt := []struct { @@ -739,7 +739,7 @@ func TestVM_Block_UnmarshalToMap(t *testing.T) { func TestVM_Block_UnmarshalToAny(t *testing.T) { type OuterBlock struct { - Settings any `river:"some.settings,block"` + Settings any `alloy:"some.settings,block"` } input := ` @@ -765,8 +765,8 @@ func TestVM_Block_UnmarshalToAny(t *testing.T) { } type Setting struct { - FieldA string `river:"field_a,attr"` - FieldB string `river:"field_b,attr"` + FieldA string `alloy:"field_a,attr"` + FieldB string `alloy:"field_b,attr"` UnmarshalCalled bool DefaultCalled bool diff --git a/syntax/vm/vm_errors_test.go b/syntax/vm/vm_errors_test.go index bec22de001..219d6d47c7 100644 --- a/syntax/vm/vm_errors_test.go +++ b/syntax/vm/vm_errors_test.go @@ -12,9 +12,9 @@ func TestVM_ExprErrors(t *testing.T) { type Target struct { Key struct { Object struct { - Field1 []int `river:"field1,attr"` - } `river:"object,attr"` - } `river:"key,attr"` + Field1 []int `alloy:"field1,attr"` + } `alloy:"object,attr"` + } `alloy:"key,attr"` } tt := []struct { @@ -61,7 +61,7 @@ func TestVM_ExprErrors(t *testing.T) { name: "complex expr", input: `key = [0, 1, 2]`, into: &struct { - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` }{}, expect: `test:1:7: [0, 1, 2] should be string, got array`, }, diff --git a/syntax/vm/vm_stdlib_test.go b/syntax/vm/vm_stdlib_test.go index 591a7bdd27..0c91fb88a5 100644 --- a/syntax/vm/vm_stdlib_test.go +++ b/syntax/vm/vm_stdlib_test.go @@ -188,11 +188,11 @@ func BenchmarkConcat(b *testing.B) { // If the code path is fully optimized, there will be no intermediate // translations to interface{}. type Person struct { - Name string `river:"name,attr"` - Attrs map[string]string `river:"attrs,attr"` + Name string `alloy:"name,attr"` + Attrs map[string]string `alloy:"attrs,attr"` } type Body struct { - Values []Person `river:"values,attr"` + Values []Person `alloy:"values,attr"` } in := `values = concat(values_ref)` diff --git a/syntax/vm/vm_test.go b/syntax/vm/vm_test.go index f073b94d4b..877ac879b3 100644 --- a/syntax/vm/vm_test.go +++ b/syntax/vm/vm_test.go @@ -110,9 +110,9 @@ func TestVM_Evaluate(t *testing.T) { age = 42, }`, expect: struct { - Name string `river:"name,attr"` - Age int `river:"age,attr"` - Country string `river:"country,attr,optional"` + Name string `alloy:"name,attr"` + Age int `alloy:"age,attr"` + Country string `alloy:"country,attr,optional"` }{ Name: "John Doe", Age: 42, @@ -207,7 +207,7 @@ func TestVM_Evaluate_IdentifierExpr(t *testing.T) { func TestVM_Evaluate_AccessExpr(t *testing.T) { t.Run("Lookup optional field", func(t *testing.T) { type Person struct { - Name string `river:"name,attr,optional"` + Name string `alloy:"name,attr,optional"` } scope := &vm.Scope{ From 49b34c5d498a1aec456133bdf0098af9ebcb9275 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:52:18 -0400 Subject: [PATCH 049/136] agentlint: lint for alloy tags --- .../internal/syntaxtags/syntaxtags.go | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go b/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go index 1673861c6d..5d91ccbdad 100644 --- a/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go +++ b/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go @@ -20,17 +20,17 @@ var Analyzer = &analysis.Analyzer{ var noLintRegex = regexp.MustCompile(`//\s*nolint:(\S+)`) var ( - syntaxTagRegex = regexp.MustCompile(`river:"([^"]*)"`) + syntaxTagRegex = regexp.MustCompile(`alloy:"([^"]*)"`) jsonTagRegex = regexp.MustCompile(`json:"([^"]*)"`) yamlTagRegex = regexp.MustCompile(`yaml:"([^"]*)"`) ) -// Rules for river tag linting: +// Rules for alloy tag linting: // -// - No river tags on anonymous fields. -// - No river tags on unexported fields. -// - No empty tags (river:""). -// - Tags must have options (river:"NAME,OPTIONS"). +// - No alloy tags on anonymous fields. +// - No alloy tags on unexported fields. +// - No empty tags (alloy:""). +// - Tags must have options (alloy:"NAME,OPTIONS"). // - Options must be one of the following: // - attr // - attr,optional @@ -79,7 +79,7 @@ func run(p *analysis.Pass) (interface{}, error) { matches := syntaxTagRegex.FindAllStringSubmatch(s.Tag(i), -1) if len(matches) == 0 && hasSyntaxTags { - // If this struct has River tags, but this field only has json/yaml + // If this struct has alloy tags, but this field only has json/yaml // tags, emit an error. jsonMatches := jsonTagRegex.FindAllStringSubmatch(s.Tag(i), -1) yamlMatches := yamlTagRegex.FindAllStringSubmatch(s.Tag(i), -1) @@ -88,7 +88,7 @@ func run(p *analysis.Pass) (interface{}, error) { p.Report(analysis.Diagnostic{ Pos: field.Pos(), Category: "syntaxtags", - Message: "field has yaml or json tags, but no river tags", + Message: "field has yaml or json tags, but no alloy tags", }) } @@ -99,7 +99,7 @@ func run(p *analysis.Pass) (interface{}, error) { p.Report(analysis.Diagnostic{ Pos: field.Pos(), Category: "syntaxtags", - Message: "field should not have more than one river tag", + Message: "field should not have more than one alloy tag", }) } @@ -108,22 +108,22 @@ func run(p *analysis.Pass) (interface{}, error) { p.Report(analysis.Diagnostic{ Pos: field.Pos(), Category: "syntaxtags", - Message: "river tags may not be given to anonymous fields", + Message: "alloy tags may not be given to anonymous fields", }) } if !field.Exported() { p.Report(analysis.Diagnostic{ Pos: field.Pos(), Category: "syntaxtags", - Message: "river tags may only be given to exported fields", + Message: "alloy tags may only be given to exported fields", }) } if len(nodeField.Names) > 1 { - // Report "a, b, c int `river:"name,attr"`" as invalid usage. + // Report "a, b, c int `alloy:"name,attr"`" as invalid usage. p.Report(analysis.Diagnostic{ Pos: field.Pos(), Category: "syntaxtags", - Message: "river tags should not be inserted on field names separated by commas", + Message: "alloy tags should not be inserted on field names separated by commas", }) } @@ -218,13 +218,13 @@ type structInfo struct { func lintSyntaxTag(ty *types.Var, tag string) (diagnostics []string) { if tag == "" { - diagnostics = append(diagnostics, "river tag should not be empty") + diagnostics = append(diagnostics, "alloy tag should not be empty") return } parts := strings.SplitN(tag, ",", 2) if len(parts) != 2 { - diagnostics = append(diagnostics, "river tag is missing options") + diagnostics = append(diagnostics, "alloy tag is missing options") return } From ae38d8d71ea822889feefeae43139d954e554ce0 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 14:53:19 -0400 Subject: [PATCH 050/136] all: use alloy struct tags --- internal/component/common/config/selectors.go | 10 +- internal/component/common/config/types.go | 74 ++++----- .../component/common/kubernetes/kubernetes.go | 6 +- internal/component/common/kubernetes/rules.go | 10 +- internal/component/common/net/config.go | 36 ++--- internal/component/common/net/config_test.go | 2 +- internal/component/common/relabel/relabel.go | 14 +- internal/component/component_health.go | 6 +- internal/component/discovery/aws/ec2.go | 24 +-- internal/component/discovery/aws/lightsail.go | 18 +-- internal/component/discovery/azure/azure.go | 32 ++-- internal/component/discovery/consul/consul.go | 30 ++-- .../discovery/consulagent/consulagent.go | 22 +-- .../discovery/digitalocean/digitalocean.go | 14 +- internal/component/discovery/discovery.go | 2 +- internal/component/discovery/dns/dns.go | 8 +- internal/component/discovery/docker/docker.go | 16 +- .../discovery/dockerswarm/dockerswarm.go | 16 +- internal/component/discovery/eureka/eureka.go | 6 +- internal/component/discovery/file/file.go | 4 +- internal/component/discovery/gce/gce.go | 12 +- .../component/discovery/hetzner/hetzner.go | 8 +- internal/component/discovery/http/http.go | 6 +- internal/component/discovery/ionos/ionos.go | 8 +- .../component/discovery/kubelet/kubelet.go | 8 +- .../discovery/kubernetes/kubernetes.go | 26 +-- internal/component/discovery/kuma/kuma.go | 8 +- internal/component/discovery/linode/linode.go | 8 +- .../component/discovery/marathon/marathon.go | 10 +- internal/component/discovery/nerve/nerve.go | 6 +- internal/component/discovery/nomad/nomad.go | 14 +- .../discovery/openstack/openstack.go | 36 ++--- .../component/discovery/ovhcloud/ovhcloud.go | 12 +- internal/component/discovery/process/args.go | 18 +-- .../component/discovery/puppetdb/puppetdb.go | 12 +- .../component/discovery/relabel/relabel.go | 8 +- .../component/discovery/scaleway/scaleway.go | 32 ++-- .../discovery/serverset/serverset.go | 6 +- internal/component/discovery/triton/triton.go | 18 +-- internal/component/discovery/uyuni/uyuni.go | 20 +-- internal/component/faro/receiver/arguments.go | 42 ++--- internal/component/local/file/file.go | 10 +- internal/component/local/file_match/file.go | 4 +- internal/component/loki/echo/echo.go | 2 +- .../component/loki/process/metric/counters.go | 18 +-- .../component/loki/process/metric/gauges.go | 14 +- .../loki/process/metric/histograms.go | 14 +- internal/component/loki/process/process.go | 6 +- .../component/loki/process/process_test.go | 8 +- .../component/loki/process/stages/drop.go | 14 +- .../loki/process/stages/eventlogmessage.go | 6 +- .../loki/process/stages/extensions.go | 6 +- .../component/loki/process/stages/geoip.go | 8 +- .../component/loki/process/stages/json.go | 6 +- .../loki/process/stages/label_drop.go | 2 +- .../loki/process/stages/label_keep.go | 2 +- .../component/loki/process/stages/labels.go | 2 +- .../component/loki/process/stages/limit.go | 10 +- .../component/loki/process/stages/logfmt.go | 4 +- .../component/loki/process/stages/match.go | 10 +- .../component/loki/process/stages/metric.go | 8 +- .../loki/process/stages/multiline.go | 6 +- .../component/loki/process/stages/output.go | 2 +- .../component/loki/process/stages/pack.go | 4 +- .../component/loki/process/stages/pipeline.go | 50 +++--- .../loki/process/stages/pipeline_test.go | 2 +- .../component/loki/process/stages/regex.go | 4 +- .../component/loki/process/stages/replace.go | 6 +- .../component/loki/process/stages/sampling.go | 4 +- .../loki/process/stages/static_labels.go | 2 +- .../component/loki/process/stages/template.go | 4 +- .../component/loki/process/stages/tenant.go | 6 +- .../loki/process/stages/timestamp.go | 10 +- internal/component/loki/relabel/relabel.go | 10 +- .../component/loki/relabel/relabel_test.go | 6 +- .../component/loki/rules/kubernetes/debug.go | 18 +-- .../component/loki/rules/kubernetes/types.go | 18 +-- internal/component/loki/source/api/api.go | 10 +- .../loki/source/aws_firehose/component.go | 10 +- .../azure_event_hubs/azure_event_hubs.go | 26 +-- .../loki/source/cloudflare/cloudflare.go | 20 +-- .../component/loki/source/docker/docker.go | 26 +-- internal/component/loki/source/file/file.go | 32 ++-- .../component/loki/source/gcplog/gcplog.go | 10 +- .../loki/source/gcplog/gcptypes/gcptypes.go | 20 +-- internal/component/loki/source/gelf/gelf.go | 8 +- .../component/loki/source/heroku/heroku.go | 14 +- .../component/loki/source/journal/types.go | 14 +- internal/component/loki/source/kafka/kafka.go | 44 +++--- .../loki/source/kubernetes/kubernetes.go | 18 +-- .../kubernetes_events/event_controller.go | 4 +- .../kubernetes_events/kubernetes_events.go | 12 +- .../component/loki/source/podlogs/podlogs.go | 14 +- .../loki/source/podlogs/reconciler.go | 24 +-- .../component/loki/source/syslog/syslog.go | 16 +- .../component/loki/source/syslog/types.go | 18 +-- .../loki/source/windowsevent/arguments.go | 22 +-- internal/component/loki/write/types.go | 30 ++-- internal/component/loki/write/write.go | 20 +-- .../component/mimir/rules/kubernetes/debug.go | 18 +-- .../component/mimir/rules/kubernetes/types.go | 20 +-- internal/component/otelcol/auth/auth.go | 2 +- .../component/otelcol/auth/basic/basic.go | 4 +- .../component/otelcol/auth/bearer/bearer.go | 4 +- .../component/otelcol/auth/headers/headers.go | 10 +- .../component/otelcol/auth/oauth2/oauth2.go | 18 +-- .../component/otelcol/auth/sigv4/sigv4.go | 12 +- .../component/otelcol/config_attraction.go | 14 +- .../component/otelcol/config_debug_metrics.go | 2 +- internal/component/otelcol/config_filter.go | 44 +++--- internal/component/otelcol/config_grpc.go | 64 ++++---- internal/component/otelcol/config_http.go | 42 ++--- internal/component/otelcol/config_k8s.go | 4 +- internal/component/otelcol/config_queue.go | 6 +- internal/component/otelcol/config_retry.go | 12 +- internal/component/otelcol/config_scrape.go | 6 +- internal/component/otelcol/config_tls.go | 30 ++-- .../otelcol/connector/host_info/host_info.go | 6 +- .../connector/servicegraph/servicegraph.go | 18 +-- .../otelcol/connector/spanlogs/spanlogs.go | 28 ++-- .../connector/spanmetrics/spanmetrics.go | 18 +-- .../otelcol/connector/spanmetrics/types.go | 18 +-- internal/component/otelcol/consumer.go | 8 +- .../exporter/loadbalancing/loadbalancing.go | 58 +++---- .../otelcol/exporter/logging/logging.go | 8 +- .../component/otelcol/exporter/loki/loki.go | 2 +- .../component/otelcol/exporter/otlp/otlp.go | 10 +- .../otelcol/exporter/otlphttp/otlphttp.go | 14 +- .../otelcol/exporter/prometheus/prometheus.go | 14 +- .../jaeger_remote_sampling.go | 14 +- .../processor/attributes/attributes.go | 6 +- .../otelcol/processor/batch/batch.go | 12 +- .../otelcol/processor/discovery/discovery.go | 8 +- .../otelcol/processor/filter/filter.go | 10 +- .../otelcol/processor/filter/types.go | 10 +- .../processor/k8sattributes/k8sattributes.go | 14 +- .../otelcol/processor/k8sattributes/types.go | 40 ++--- .../processor/memorylimiter/memorylimiter.go | 12 +- .../probabilistic_sampler.go | 12 +- .../internal/aws/ec2/config.go | 22 +-- .../internal/aws/ecs/config.go | 30 ++-- .../internal/aws/eks/config.go | 6 +- .../internal/aws/elasticbeanstalk/config.go | 12 +- .../internal/aws/lambda/config.go | 20 +-- .../internal/azure/aks/config.go | 6 +- .../internal/azure/config.go | 22 +-- .../internal/consul/config.go | 20 +-- .../internal/docker/config.go | 6 +- .../resourcedetection/internal/gcp/config.go | 36 ++--- .../internal/heroku/config.go | 18 +-- .../internal/k8snode/config.go | 10 +- .../internal/openshift/config.go | 16 +- .../resource_attribute_config.go | 2 +- .../internal/system/config.go | 26 +-- .../resourcedetection/resourcedetection.go | 40 ++--- .../component/otelcol/processor/span/span.go | 22 +-- .../processor/tail_sampling/tail_sampling.go | 10 +- .../otelcol/processor/tail_sampling/types.go | 96 ++++++------ .../otelcol/processor/transform/transform.go | 14 +- .../otelcol/receiver/jaeger/jaeger.go | 32 ++-- .../component/otelcol/receiver/kafka/kafka.go | 90 +++++------ .../component/otelcol/receiver/loki/loki.go | 4 +- .../otelcol/receiver/opencensus/opencensus.go | 8 +- .../component/otelcol/receiver/otlp/otlp.go | 16 +- .../otelcol/receiver/prometheus/prometheus.go | 4 +- .../otelcol/receiver/vcenter/vcenter.go | 116 +++++++------- .../otelcol/receiver/zipkin/zipkin.go | 8 +- .../prometheus/exporter/apache/apache.go | 6 +- .../prometheus/exporter/azure/azure.go | 28 ++-- .../prometheus/exporter/blackbox/blackbox.go | 16 +- .../prometheus/exporter/cadvisor/cadvisor.go | 36 ++--- .../prometheus/exporter/cloudwatch/config.go | 64 ++++---- .../prometheus/exporter/consul/consul.go | 26 +-- .../prometheus/exporter/dnsmasq/dnsmasq.go | 6 +- .../exporter/elasticsearch/elasticsearch.go | 36 ++--- .../component/prometheus/exporter/exporter.go | 2 +- .../component/prometheus/exporter/gcp/gcp.go | 16 +- .../prometheus/exporter/github/github.go | 12 +- .../prometheus/exporter/kafka/kafka.go | 44 +++--- .../exporter/memcached/memcached.go | 6 +- .../prometheus/exporter/mongodb/mongodb.go | 8 +- .../prometheus/exporter/mssql/mssql.go | 10 +- .../prometheus/exporter/mysql/mysql.go | 56 +++---- .../prometheus/exporter/oracledb/oracledb.go | 8 +- .../prometheus/exporter/postgres/postgres.go | 16 +- .../prometheus/exporter/process/process.go | 20 +-- .../prometheus/exporter/redis/redis.go | 64 ++++---- .../prometheus/exporter/snmp/snmp.go | 28 ++-- .../exporter/snowflake/snowflake.go | 10 +- .../prometheus/exporter/squid/squid.go | 6 +- .../prometheus/exporter/statsd/config.go | 40 ++--- .../prometheus/exporter/unix/config.go | 148 +++++++++--------- .../prometheus/exporter/windows/config.go | 98 ++++++------ .../component/prometheus/operator/types.go | 32 ++-- .../prometheus/receive_http/receive_http.go | 4 +- .../component/prometheus/relabel/relabel.go | 10 +- .../component/prometheus/remotewrite/types.go | 78 ++++----- .../component/prometheus/scrape/scrape.go | 60 +++---- internal/component/pyroscope/ebpf/args.go | 26 +-- .../component/pyroscope/ebpf/ebpf_linux.go | 4 +- internal/component/pyroscope/java/args.go | 18 +-- internal/component/pyroscope/scrape/scrape.go | 68 ++++---- internal/component/pyroscope/write/write.go | 22 +-- internal/component/remote/http/http.go | 18 +-- .../component/remote/kubernetes/kubernetes.go | 12 +- internal/component/remote/s3/types.go | 24 +-- internal/component/remote/vault/auth.go | 76 ++++----- internal/component/remote/vault/refresher.go | 10 +- internal/component/remote/vault/vault.go | 26 +-- internal/flow/componenttest/testfailmodule.go | 4 +- internal/flow/flow_services_test.go | 4 +- .../flow/internal/controller/loader_test.go | 2 +- .../controller/node_config_argument.go | 6 +- .../internal/controller/node_config_export.go | 2 +- .../internal/controller/value_cache_test.go | 6 +- .../flow/internal/importsource/import_file.go | 6 +- .../flow/internal/importsource/import_git.go | 10 +- .../flow/internal/importsource/import_http.go | 14 +- .../internal/importsource/import_string.go | 2 +- .../flow/internal/testcomponents/count.go | 6 +- .../testcomponents/module/file/file.go | 4 +- .../internal/testcomponents/module/git/git.go | 16 +- .../testcomponents/module/http/http.go | 4 +- .../internal/testcomponents/module/module.go | 2 +- .../testcomponents/module/string/string.go | 4 +- .../internal/testcomponents/passthrough.go | 8 +- .../flow/internal/testcomponents/sumation.go | 6 +- internal/flow/internal/testcomponents/tick.go | 4 +- internal/flow/logging/options.go | 6 +- internal/flow/module_test.go | 4 +- internal/flow/tracing/tracing.go | 14 +- internal/service/cluster/cluster.go | 2 +- internal/service/http/http.go | 2 +- internal/service/http/tls.go | 44 +++--- internal/service/remotecfg/remotecfg.go | 10 +- internal/vcs/auth.go | 16 +- 236 files changed, 2128 insertions(+), 2128 deletions(-) diff --git a/internal/component/common/config/selectors.go b/internal/component/common/config/selectors.go index e4ae0e7dd9..61489604b8 100644 --- a/internal/component/common/config/selectors.go +++ b/internal/component/common/config/selectors.go @@ -8,8 +8,8 @@ import ( // LabelSelector defines a selector to check to see if a set of Kubernetes // labels matches a selector. type LabelSelector struct { - MatchLabels map[string]string `river:"match_labels,attr,optional"` - MatchExpressions []MatchExpression `river:"match_expression,block,optional"` + MatchLabels map[string]string `alloy:"match_labels,attr,optional"` + MatchExpressions []MatchExpression `alloy:"match_expression,block,optional"` } // BuildSelector builds a [labels.Selector] from a Flow LabelSelector. @@ -32,9 +32,9 @@ func (ls *LabelSelector) BuildSelector() (labels.Selector, error) { // MatchExpression defines an expression matcher to check to see if some key // from a Kubernetes resource matches a selector. type MatchExpression struct { - Key string `river:"key,attr"` - Operator string `river:"operator,attr"` - Values []string `river:"values,attr,optional"` + Key string `alloy:"key,attr"` + Operator string `alloy:"operator,attr"` + Values []string `alloy:"values,attr,optional"` } func (me *MatchExpression) buildExpression() metav1.LabelSelectorRequirement { diff --git a/internal/component/common/config/types.go b/internal/component/common/config/types.go index 561b9e7aa0..85ebf2637a 100644 --- a/internal/component/common/config/types.go +++ b/internal/component/common/config/types.go @@ -15,15 +15,15 @@ const bearerAuth string = "Bearer" // HTTPClientConfig mirrors config.HTTPClientConfig type HTTPClientConfig struct { - BasicAuth *BasicAuth `river:"basic_auth,block,optional"` - Authorization *Authorization `river:"authorization,block,optional"` - OAuth2 *OAuth2Config `river:"oauth2,block,optional"` - BearerToken alloytypes.Secret `river:"bearer_token,attr,optional"` - BearerTokenFile string `river:"bearer_token_file,attr,optional"` - ProxyConfig *ProxyConfig `river:",squash"` - TLSConfig TLSConfig `river:"tls_config,block,optional"` - FollowRedirects bool `river:"follow_redirects,attr,optional"` - EnableHTTP2 bool `river:"enable_http2,attr,optional"` + BasicAuth *BasicAuth `alloy:"basic_auth,block,optional"` + Authorization *Authorization `alloy:"authorization,block,optional"` + OAuth2 *OAuth2Config `alloy:"oauth2,block,optional"` + BearerToken alloytypes.Secret `alloy:"bearer_token,attr,optional"` + BearerTokenFile string `alloy:"bearer_token_file,attr,optional"` + ProxyConfig *ProxyConfig `alloy:",squash"` + TLSConfig TLSConfig `alloy:"tls_config,block,optional"` + FollowRedirects bool `alloy:"follow_redirects,attr,optional"` + EnableHTTP2 bool `alloy:"enable_http2,attr,optional"` } // SetToDefault implements the river.Defaulter @@ -109,9 +109,9 @@ var DefaultHTTPClientConfig = HTTPClientConfig{ // BasicAuth configures Basic HTTP authentication credentials. type BasicAuth struct { - Username string `river:"username,attr,optional"` - Password alloytypes.Secret `river:"password,attr,optional"` - PasswordFile string `river:"password_file,attr,optional"` + Username string `alloy:"username,attr,optional"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + PasswordFile string `alloy:"password_file,attr,optional"` } // Convert converts our type to the native prometheus type @@ -139,10 +139,10 @@ func (b *BasicAuth) Validate() error { } type ProxyConfig struct { - ProxyURL URL `river:"proxy_url,attr,optional"` - NoProxy string `river:"no_proxy,attr,optional"` - ProxyFromEnvironment bool `river:"proxy_from_environment,attr,optional"` - ProxyConnectHeader Header `river:",squash"` + ProxyURL URL `alloy:"proxy_url,attr,optional"` + NoProxy string `alloy:"no_proxy,attr,optional"` + ProxyFromEnvironment bool `alloy:"proxy_from_environment,attr,optional"` + ProxyConnectHeader Header `alloy:",squash"` } func (p *ProxyConfig) Convert() config.ProxyConfig { @@ -215,7 +215,7 @@ func (u *URL) Convert() config.URL { } type Header struct { - Header map[string][]alloytypes.Secret `river:"proxy_connect_header,attr,optional"` + Header map[string][]alloytypes.Secret `alloy:"proxy_connect_header,attr,optional"` } func (h *Header) Convert() config.Header { @@ -238,9 +238,9 @@ func (h *Header) Convert() config.Header { // Authorization sets up HTTP authorization credentials. type Authorization struct { - Type string `river:"type,attr,optional"` - Credentials alloytypes.Secret `river:"credentials,attr,optional"` - CredentialsFile string `river:"credentials_file,attr,optional"` + Type string `alloy:"type,attr,optional"` + Credentials alloytypes.Secret `alloy:"credentials,attr,optional"` + CredentialsFile string `alloy:"credentials_file,attr,optional"` } // Convert converts our type to the native prometheus type @@ -301,15 +301,15 @@ func (tv *TLSVersion) UnmarshalText(text []byte) error { // TLSConfig sets up options for TLS connections. type TLSConfig struct { - CA string `river:"ca_pem,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - Cert string `river:"cert_pem,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - Key alloytypes.Secret `river:"key_pem,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - ServerName string `river:"server_name,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - MinVersion TLSVersion `river:"min_version,attr,optional"` + CA string `alloy:"ca_pem,attr,optional"` + CAFile string `alloy:"ca_file,attr,optional"` + Cert string `alloy:"cert_pem,attr,optional"` + CertFile string `alloy:"cert_file,attr,optional"` + Key alloytypes.Secret `alloy:"key_pem,attr,optional"` + KeyFile string `alloy:"key_file,attr,optional"` + ServerName string `alloy:"server_name,attr,optional"` + InsecureSkipVerify bool `alloy:"insecure_skip_verify,attr,optional"` + MinVersion TLSVersion `alloy:"min_version,attr,optional"` } // Convert converts our type to the native prometheus type @@ -358,14 +358,14 @@ func (t *TLSConfig) Validate() error { // OAuth2Config sets up the OAuth2 client. type OAuth2Config struct { - ClientID string `river:"client_id,attr,optional"` - ClientSecret alloytypes.Secret `river:"client_secret,attr,optional"` - ClientSecretFile string `river:"client_secret_file,attr,optional"` - Scopes []string `river:"scopes,attr,optional"` - TokenURL string `river:"token_url,attr,optional"` - EndpointParams map[string]string `river:"endpoint_params,attr,optional"` - ProxyConfig *ProxyConfig `river:",squash"` - TLSConfig *TLSConfig `river:"tls_config,block,optional"` + ClientID string `alloy:"client_id,attr,optional"` + ClientSecret alloytypes.Secret `alloy:"client_secret,attr,optional"` + ClientSecretFile string `alloy:"client_secret_file,attr,optional"` + Scopes []string `alloy:"scopes,attr,optional"` + TokenURL string `alloy:"token_url,attr,optional"` + EndpointParams map[string]string `alloy:"endpoint_params,attr,optional"` + ProxyConfig *ProxyConfig `alloy:",squash"` + TLSConfig *TLSConfig `alloy:"tls_config,block,optional"` } // Convert converts our type to the native prometheus type diff --git a/internal/component/common/kubernetes/kubernetes.go b/internal/component/common/kubernetes/kubernetes.go index df09a0f224..01d8c009d5 100644 --- a/internal/component/common/kubernetes/kubernetes.go +++ b/internal/component/common/kubernetes/kubernetes.go @@ -15,9 +15,9 @@ import ( // ClientArguments controls how to connect to a Kubernetes cluster. type ClientArguments struct { - APIServer commoncfg.URL `river:"api_server,attr,optional"` - KubeConfig string `river:"kubeconfig_file,attr,optional"` - HTTPClientConfig commoncfg.HTTPClientConfig `river:",squash"` + APIServer commoncfg.URL `alloy:"api_server,attr,optional"` + KubeConfig string `alloy:"kubeconfig_file,attr,optional"` + HTTPClientConfig commoncfg.HTTPClientConfig `alloy:",squash"` } // DefaultClientArguments holds default values for Arguments. diff --git a/internal/component/common/kubernetes/rules.go b/internal/component/common/kubernetes/rules.go index c89d9742af..c4bcc7d198 100644 --- a/internal/component/common/kubernetes/rules.go +++ b/internal/component/common/kubernetes/rules.go @@ -6,14 +6,14 @@ import ( ) type LabelSelector struct { - MatchLabels map[string]string `river:"match_labels,attr,optional"` - MatchExpressions []MatchExpression `river:"match_expression,block,optional"` + MatchLabels map[string]string `alloy:"match_labels,attr,optional"` + MatchExpressions []MatchExpression `alloy:"match_expression,block,optional"` } type MatchExpression struct { - Key string `river:"key,attr"` - Operator string `river:"operator,attr"` - Values []string `river:"values,attr,optional"` + Key string `alloy:"key,attr"` + Operator string `alloy:"operator,attr"` + Values []string `alloy:"values,attr,optional"` } func ConvertSelectorToListOptions(selector LabelSelector) (labels.Selector, error) { diff --git a/internal/component/common/net/config.go b/internal/component/common/net/config.go index 444627bf50..6f7c40479e 100644 --- a/internal/component/common/net/config.go +++ b/internal/component/common/net/config.go @@ -26,24 +26,24 @@ const ( type ServerConfig struct { // HTTP configures the HTTP dskit. Note that despite the block being present or not, // the dskit is always started. - HTTP *HTTPConfig `river:"http,block,optional"` + HTTP *HTTPConfig `alloy:"http,block,optional"` // GRPC configures the gRPC dskit. Note that despite the block being present or not, // the dskit is always started. - GRPC *GRPCConfig `river:"grpc,block,optional"` + GRPC *GRPCConfig `alloy:"grpc,block,optional"` // GracefulShutdownTimeout configures a timeout to gracefully shut down the server. - GracefulShutdownTimeout time.Duration `river:"graceful_shutdown_timeout,attr,optional"` + GracefulShutdownTimeout time.Duration `alloy:"graceful_shutdown_timeout,attr,optional"` } // HTTPConfig configures the HTTP dskit started by dskit.Server. type HTTPConfig struct { - ListenAddress string `river:"listen_address,attr,optional"` - ListenPort int `river:"listen_port,attr,optional"` - ConnLimit int `river:"conn_limit,attr,optional"` - ServerReadTimeout time.Duration `river:"server_read_timeout,attr,optional"` - ServerWriteTimeout time.Duration `river:"server_write_timeout,attr,optional"` - ServerIdleTimeout time.Duration `river:"server_idle_timeout,attr,optional"` + ListenAddress string `alloy:"listen_address,attr,optional"` + ListenPort int `alloy:"listen_port,attr,optional"` + ConnLimit int `alloy:"conn_limit,attr,optional"` + ServerReadTimeout time.Duration `alloy:"server_read_timeout,attr,optional"` + ServerWriteTimeout time.Duration `alloy:"server_write_timeout,attr,optional"` + ServerIdleTimeout time.Duration `alloy:"server_idle_timeout,attr,optional"` } // Into applies the configs from HTTPConfig into a dskit.Into. @@ -58,15 +58,15 @@ func (h *HTTPConfig) Into(c *dskit.Config) { // GRPCConfig configures the gRPC dskit started by dskit.Server. type GRPCConfig struct { - ListenAddress string `river:"listen_address,attr,optional"` - ListenPort int `river:"listen_port,attr,optional"` - ConnLimit int `river:"conn_limit,attr,optional"` - MaxConnectionAge time.Duration `river:"max_connection_age,attr,optional"` - MaxConnectionAgeGrace time.Duration `river:"max_connection_age_grace,attr,optional"` - MaxConnectionIdle time.Duration `river:"max_connection_idle,attr,optional"` - ServerMaxRecvMsg int `river:"server_max_recv_msg_size,attr,optional"` - ServerMaxSendMsg int `river:"server_max_send_msg_size,attr,optional"` - ServerMaxConcurrentStreams uint `river:"server_max_concurrent_streams,attr,optional"` + ListenAddress string `alloy:"listen_address,attr,optional"` + ListenPort int `alloy:"listen_port,attr,optional"` + ConnLimit int `alloy:"conn_limit,attr,optional"` + MaxConnectionAge time.Duration `alloy:"max_connection_age,attr,optional"` + MaxConnectionAgeGrace time.Duration `alloy:"max_connection_age_grace,attr,optional"` + MaxConnectionIdle time.Duration `alloy:"max_connection_idle,attr,optional"` + ServerMaxRecvMsg int `alloy:"server_max_recv_msg_size,attr,optional"` + ServerMaxSendMsg int `alloy:"server_max_send_msg_size,attr,optional"` + ServerMaxConcurrentStreams uint `alloy:"server_max_concurrent_streams,attr,optional"` } // Into applies the configs from GRPCConfig into a dskit.Into. diff --git a/internal/component/common/net/config_test.go b/internal/component/common/net/config_test.go index 56a04b43ad..33ee7216ea 100644 --- a/internal/component/common/net/config_test.go +++ b/internal/component/common/net/config_test.go @@ -13,7 +13,7 @@ import ( // testArguments mimics an arguments type used by a component, applying the defaults to ServerConfig // from it's UnmarshalRiver implementation, since the block is squashed. type testArguments struct { - Server *ServerConfig `river:",squash"` + Server *ServerConfig `alloy:",squash"` } func (t *testArguments) UnmarshalRiver(f func(v interface{}) error) error { diff --git a/internal/component/common/relabel/relabel.go b/internal/component/common/relabel/relabel.go index a1acaf3b83..a0b3f8a9cf 100644 --- a/internal/component/common/relabel/relabel.go +++ b/internal/component/common/relabel/relabel.go @@ -109,13 +109,13 @@ func (re Regexp) String() string { // Config describes a relabelling step to be applied on a target. type Config struct { - SourceLabels []string `river:"source_labels,attr,optional"` - Separator string `river:"separator,attr,optional"` - Regex Regexp `river:"regex,attr,optional"` - Modulus uint64 `river:"modulus,attr,optional"` - TargetLabel string `river:"target_label,attr,optional"` - Replacement string `river:"replacement,attr,optional"` - Action Action `river:"action,attr,optional"` + SourceLabels []string `alloy:"source_labels,attr,optional"` + Separator string `alloy:"separator,attr,optional"` + Regex Regexp `alloy:"regex,attr,optional"` + Modulus uint64 `alloy:"modulus,attr,optional"` + TargetLabel string `alloy:"target_label,attr,optional"` + Replacement string `alloy:"replacement,attr,optional"` + Action Action `alloy:"action,attr,optional"` } // DefaultRelabelConfig sets the default values of fields when decoding a RelabelConfig block. diff --git a/internal/component/component_health.go b/internal/component/component_health.go index 66a3647886..bfb13ae94d 100644 --- a/internal/component/component_health.go +++ b/internal/component/component_health.go @@ -26,15 +26,15 @@ type HealthComponent interface { // River. type Health struct { // The specific health value. - Health HealthType `river:"state,attr"` + Health HealthType `alloy:"state,attr"` // An optional message to describe the health; useful to say why a component // is unhealthy. - Message string `river:"message,attr,optional"` + Message string `alloy:"message,attr,optional"` // An optional time to indicate when the component last modified something // which updated its health. - UpdateTime time.Time `river:"update_time,attr,optional"` + UpdateTime time.Time `alloy:"update_time,attr,optional"` } // HealthType holds the health value for a component. diff --git a/internal/component/discovery/aws/ec2.go b/internal/component/discovery/aws/ec2.go index 72ce4464ab..971dd25863 100644 --- a/internal/component/discovery/aws/ec2.go +++ b/internal/component/discovery/aws/ec2.go @@ -31,23 +31,23 @@ func init() { // EC2Filter is the configuration for filtering EC2 instances. type EC2Filter struct { - Name string `river:"name,attr"` - Values []string `river:"values,attr"` + Name string `alloy:"name,attr"` + Values []string `alloy:"values,attr"` } // EC2Arguments is the configuration for EC2 based service discovery. type EC2Arguments struct { - Endpoint string `river:"endpoint,attr,optional"` - Region string `river:"region,attr,optional"` - AccessKey string `river:"access_key,attr,optional"` - SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` - Profile string `river:"profile,attr,optional"` - RoleARN string `river:"role_arn,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - Filters []*EC2Filter `river:"filter,block,optional"` + Endpoint string `alloy:"endpoint,attr,optional"` + Region string `alloy:"region,attr,optional"` + AccessKey string `alloy:"access_key,attr,optional"` + SecretKey alloytypes.Secret `alloy:"secret_key,attr,optional"` + Profile string `alloy:"profile,attr,optional"` + RoleARN string `alloy:"role_arn,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + Filters []*EC2Filter `alloy:"filter,block,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } func (args EC2Arguments) Convert() *promaws.EC2SDConfig { diff --git a/internal/component/discovery/aws/lightsail.go b/internal/component/discovery/aws/lightsail.go index ca35979fba..b64f5f9ba4 100644 --- a/internal/component/discovery/aws/lightsail.go +++ b/internal/component/discovery/aws/lightsail.go @@ -31,15 +31,15 @@ func init() { // LightsailArguments is the configuration for AWS Lightsail based service discovery. type LightsailArguments struct { - Endpoint string `river:"endpoint,attr,optional"` - Region string `river:"region,attr,optional"` - AccessKey string `river:"access_key,attr,optional"` - SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` - Profile string `river:"profile,attr,optional"` - RoleARN string `river:"role_arn,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + Endpoint string `alloy:"endpoint,attr,optional"` + Region string `alloy:"region,attr,optional"` + AccessKey string `alloy:"access_key,attr,optional"` + SecretKey alloytypes.Secret `alloy:"secret_key,attr,optional"` + Profile string `alloy:"profile,attr,optional"` + RoleARN string `alloy:"role_arn,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } func (args LightsailArguments) Convert() *promaws.LightsailSDConfig { diff --git a/internal/component/discovery/azure/azure.go b/internal/component/discovery/azure/azure.go index 61b4857737..beb7fb64e8 100644 --- a/internal/component/discovery/azure/azure.go +++ b/internal/component/discovery/azure/azure.go @@ -29,28 +29,28 @@ func init() { } type Arguments struct { - Environment string `river:"environment,attr,optional"` - Port int `river:"port,attr,optional"` - SubscriptionID string `river:"subscription_id,attr,optional"` - OAuth *OAuth `river:"oauth,block,optional"` - ManagedIdentity *ManagedIdentity `river:"managed_identity,block,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - ResourceGroup string `river:"resource_group,attr,optional"` - - ProxyConfig *config.ProxyConfig `river:",squash"` - FollowRedirects bool `river:"follow_redirects,attr,optional"` - EnableHTTP2 bool `river:"enable_http2,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` + Environment string `alloy:"environment,attr,optional"` + Port int `alloy:"port,attr,optional"` + SubscriptionID string `alloy:"subscription_id,attr,optional"` + OAuth *OAuth `alloy:"oauth,block,optional"` + ManagedIdentity *ManagedIdentity `alloy:"managed_identity,block,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + ResourceGroup string `alloy:"resource_group,attr,optional"` + + ProxyConfig *config.ProxyConfig `alloy:",squash"` + FollowRedirects bool `alloy:"follow_redirects,attr,optional"` + EnableHTTP2 bool `alloy:"enable_http2,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` } type OAuth struct { - ClientID string `river:"client_id,attr"` - TenantID string `river:"tenant_id,attr"` - ClientSecret alloytypes.Secret `river:"client_secret,attr"` + ClientID string `alloy:"client_id,attr"` + TenantID string `alloy:"tenant_id,attr"` + ClientSecret alloytypes.Secret `alloy:"client_secret,attr"` } type ManagedIdentity struct { - ClientID string `river:"client_id,attr"` + ClientID string `alloy:"client_id,attr"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/consul/consul.go b/internal/component/discovery/consul/consul.go index 529b44af9e..f68eb614ab 100644 --- a/internal/component/discovery/consul/consul.go +++ b/internal/component/discovery/consul/consul.go @@ -28,22 +28,22 @@ func init() { } type Arguments struct { - Server string `river:"server,attr,optional"` - Token alloytypes.Secret `river:"token,attr,optional"` - Datacenter string `river:"datacenter,attr,optional"` - Namespace string `river:"namespace,attr,optional"` - Partition string `river:"partition,attr,optional"` - TagSeparator string `river:"tag_separator,attr,optional"` - Scheme string `river:"scheme,attr,optional"` - Username string `river:"username,attr,optional"` - Password alloytypes.Secret `river:"password,attr,optional"` - AllowStale bool `river:"allow_stale,attr,optional"` - Services []string `river:"services,attr,optional"` - ServiceTags []string `river:"tags,attr,optional"` - NodeMeta map[string]string `river:"node_meta,attr,optional"` + Server string `alloy:"server,attr,optional"` + Token alloytypes.Secret `alloy:"token,attr,optional"` + Datacenter string `alloy:"datacenter,attr,optional"` + Namespace string `alloy:"namespace,attr,optional"` + Partition string `alloy:"partition,attr,optional"` + TagSeparator string `alloy:"tag_separator,attr,optional"` + Scheme string `alloy:"scheme,attr,optional"` + Username string `alloy:"username,attr,optional"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + AllowStale bool `alloy:"allow_stale,attr,optional"` + Services []string `alloy:"services,attr,optional"` + ServiceTags []string `alloy:"tags,attr,optional"` + NodeMeta map[string]string `alloy:"node_meta,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/consulagent/consulagent.go b/internal/component/discovery/consulagent/consulagent.go index 76f96c7370..d897072c9e 100644 --- a/internal/component/discovery/consulagent/consulagent.go +++ b/internal/component/discovery/consulagent/consulagent.go @@ -27,17 +27,17 @@ func init() { } type Arguments struct { - Server string `river:"server,attr,optional"` - Token alloytypes.Secret `river:"token,attr,optional"` - Datacenter string `river:"datacenter,attr,optional"` - TagSeparator string `river:"tag_separator,attr,optional"` - Scheme string `river:"scheme,attr,optional"` - Username string `river:"username,attr,optional"` - Password alloytypes.Secret `river:"password,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Services []string `river:"services,attr,optional"` - ServiceTags []string `river:"tags,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` + Server string `alloy:"server,attr,optional"` + Token alloytypes.Secret `alloy:"token,attr,optional"` + Datacenter string `alloy:"datacenter,attr,optional"` + TagSeparator string `alloy:"tag_separator,attr,optional"` + Scheme string `alloy:"scheme,attr,optional"` + Username string `alloy:"username,attr,optional"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Services []string `alloy:"services,attr,optional"` + ServiceTags []string `alloy:"tags,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/digitalocean/digitalocean.go b/internal/component/discovery/digitalocean/digitalocean.go index 7fe661fcf3..bb7315eb15 100644 --- a/internal/component/discovery/digitalocean/digitalocean.go +++ b/internal/component/discovery/digitalocean/digitalocean.go @@ -27,15 +27,15 @@ func init() { } type Arguments struct { - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` - BearerToken alloytypes.Secret `river:"bearer_token,attr,optional"` - BearerTokenFile string `river:"bearer_token_file,attr,optional"` + BearerToken alloytypes.Secret `alloy:"bearer_token,attr,optional"` + BearerTokenFile string `alloy:"bearer_token_file,attr,optional"` - ProxyConfig *config.ProxyConfig `river:",squash"` - FollowRedirects bool `river:"follow_redirects,attr,optional"` - EnableHTTP2 bool `river:"enable_http2,attr,optional"` + ProxyConfig *config.ProxyConfig `alloy:",squash"` + FollowRedirects bool `alloy:"follow_redirects,attr,optional"` + EnableHTTP2 bool `alloy:"enable_http2,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/discovery.go b/internal/component/discovery/discovery.go index 2d4014cbb7..be260650d3 100644 --- a/internal/component/discovery/discovery.go +++ b/internal/component/discovery/discovery.go @@ -90,7 +90,7 @@ func (t Target) NonMetaLabels() labels.Labels { // Exports holds values which are exported by all discovery components. type Exports struct { - Targets []Target `river:"targets,attr"` + Targets []Target `alloy:"targets,attr"` } // Discoverer is an alias for Prometheus' Discoverer interface, so users of this package don't need diff --git a/internal/component/discovery/dns/dns.go b/internal/component/discovery/dns/dns.go index 3a774ae291..4f1309f37c 100644 --- a/internal/component/discovery/dns/dns.go +++ b/internal/component/discovery/dns/dns.go @@ -28,10 +28,10 @@ func init() { // Arguments configures the discovery.dns component. type Arguments struct { - Names []string `river:"names,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Type string `river:"type,attr,optional"` - Port int `river:"port,attr,optional"` + Names []string `alloy:"names,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Type string `alloy:"type,attr,optional"` + Port int `alloy:"port,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/docker/docker.go b/internal/component/discovery/docker/docker.go index 517311174b..0712c20e63 100644 --- a/internal/component/discovery/docker/docker.go +++ b/internal/component/discovery/docker/docker.go @@ -29,19 +29,19 @@ func init() { // Arguments configures the discovery.docker component. type Arguments struct { - Host string `river:"host,attr"` - Port int `river:"port,attr,optional"` - HostNetworkingHost string `river:"host_networking_host,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Filters []Filter `river:"filter,block,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + Host string `alloy:"host,attr"` + Port int `alloy:"port,attr,optional"` + HostNetworkingHost string `alloy:"host_networking_host,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Filters []Filter `alloy:"filter,block,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } // Filter is used to limit the discovery process to a subset of available // resources. type Filter struct { - Name string `river:"name,attr"` - Values []string `river:"values,attr"` + Name string `alloy:"name,attr"` + Values []string `alloy:"values,attr"` } // Convert converts a Filter to the upstream Prometheus SD type. diff --git a/internal/component/discovery/dockerswarm/dockerswarm.go b/internal/component/discovery/dockerswarm/dockerswarm.go index 030d8cad99..f7caafc665 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm.go +++ b/internal/component/discovery/dockerswarm/dockerswarm.go @@ -27,17 +27,17 @@ func init() { } type Arguments struct { - Host string `river:"host,attr"` - Role string `river:"role,attr"` - Port int `river:"port,attr,optional"` - Filters []Filter `river:"filter,block,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + Host string `alloy:"host,attr"` + Role string `alloy:"role,attr"` + Port int `alloy:"port,attr,optional"` + Filters []Filter `alloy:"filter,block,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } type Filter struct { - Name string `river:"name,attr"` - Values []string `river:"values,attr"` + Name string `alloy:"name,attr"` + Values []string `alloy:"values,attr"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/eureka/eureka.go b/internal/component/discovery/eureka/eureka.go index 1ca6bee686..3221a681a9 100644 --- a/internal/component/discovery/eureka/eureka.go +++ b/internal/component/discovery/eureka/eureka.go @@ -27,10 +27,10 @@ func init() { } type Arguments struct { - Server string `river:"server,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Server string `alloy:"server,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/file/file.go b/internal/component/discovery/file/file.go index 5d075cbfb8..4611df0e46 100644 --- a/internal/component/discovery/file/file.go +++ b/internal/component/discovery/file/file.go @@ -24,8 +24,8 @@ func init() { } type Arguments struct { - Files []string `river:"files,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Files []string `alloy:"files,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/gce/gce.go b/internal/component/discovery/gce/gce.go index ca5d3a5401..5288f9b73d 100644 --- a/internal/component/discovery/gce/gce.go +++ b/internal/component/discovery/gce/gce.go @@ -26,12 +26,12 @@ func init() { // Arguments configures the discovery.gce component. type Arguments struct { - Project string `river:"project,attr"` - Zone string `river:"zone,attr"` - Filter string `river:"filter,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - TagSeparator string `river:"tag_separator,attr,optional"` + Project string `alloy:"project,attr"` + Zone string `alloy:"zone,attr"` + Filter string `alloy:"filter,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + TagSeparator string `alloy:"tag_separator,attr,optional"` } // DefaultArguments holds default values for Arguments. diff --git a/internal/component/discovery/hetzner/hetzner.go b/internal/component/discovery/hetzner/hetzner.go index be10a499d5..974260e3da 100644 --- a/internal/component/discovery/hetzner/hetzner.go +++ b/internal/component/discovery/hetzner/hetzner.go @@ -26,10 +26,10 @@ func init() { } type Arguments struct { - Role string `river:"role,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + Role string `alloy:"role,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/http/http.go b/internal/component/discovery/http/http.go index fe74b8fd66..098d3eed12 100644 --- a/internal/component/discovery/http/http.go +++ b/internal/component/discovery/http/http.go @@ -25,9 +25,9 @@ func init() { } type Arguments struct { - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - URL config.URL `river:"url,attr"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + URL config.URL `alloy:"url,attr"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/ionos/ionos.go b/internal/component/discovery/ionos/ionos.go index d1ae37cf35..117ae38e87 100644 --- a/internal/component/discovery/ionos/ionos.go +++ b/internal/component/discovery/ionos/ionos.go @@ -26,10 +26,10 @@ func init() { } type Arguments struct { - DatacenterID string `river:"datacenter_id,attr"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` + DatacenterID string `alloy:"datacenter_id,attr"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/kubelet/kubelet.go b/internal/component/discovery/kubelet/kubelet.go index 8f06aa104b..835c0d24d3 100644 --- a/internal/component/discovery/kubelet/kubelet.go +++ b/internal/component/discovery/kubelet/kubelet.go @@ -72,10 +72,10 @@ func init() { // Arguments configures the discovery.kubelet component. type Arguments struct { - URL config.URL `river:"url,attr,optional"` - Interval time.Duration `river:"refresh_interval,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - Namespaces []string `river:"namespaces,attr,optional"` + URL config.URL `alloy:"url,attr,optional"` + Interval time.Duration `alloy:"refresh_interval,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + Namespaces []string `alloy:"namespaces,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/discovery/kubernetes/kubernetes.go b/internal/component/discovery/kubernetes/kubernetes.go index 7f379bac53..1e622aa72a 100644 --- a/internal/component/discovery/kubernetes/kubernetes.go +++ b/internal/component/discovery/kubernetes/kubernetes.go @@ -24,13 +24,13 @@ func init() { // Arguments configures the discovery.kubernetes component. type Arguments struct { - APIServer config.URL `river:"api_server,attr,optional"` - Role string `river:"role,attr"` - KubeConfig string `river:"kubeconfig_file,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - NamespaceDiscovery NamespaceDiscovery `river:"namespaces,block,optional"` - Selectors []SelectorConfig `river:"selectors,block,optional"` - AttachMetadata AttachMetadataConfig `river:"attach_metadata,block,optional"` + APIServer config.URL `alloy:"api_server,attr,optional"` + Role string `alloy:"role,attr"` + KubeConfig string `alloy:"kubeconfig_file,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + NamespaceDiscovery NamespaceDiscovery `alloy:"namespaces,block,optional"` + Selectors []SelectorConfig `alloy:"selectors,block,optional"` + AttachMetadata AttachMetadataConfig `alloy:"attach_metadata,block,optional"` } // DefaultConfig holds defaults for SDConfig. @@ -68,8 +68,8 @@ func (args *Arguments) Convert() *promk8s.SDConfig { // NamespaceDiscovery configures filtering rules for which namespaces to discover. type NamespaceDiscovery struct { - IncludeOwnNamespace bool `river:"own_namespace,attr,optional"` - Names []string `river:"names,attr,optional"` + IncludeOwnNamespace bool `alloy:"own_namespace,attr,optional"` + Names []string `alloy:"names,attr,optional"` } func (nd *NamespaceDiscovery) convert() *promk8s.NamespaceDiscovery { @@ -81,9 +81,9 @@ func (nd *NamespaceDiscovery) convert() *promk8s.NamespaceDiscovery { // SelectorConfig configures selectors to filter resources to discover. type SelectorConfig struct { - Role string `river:"role,attr"` - Label string `river:"label,attr,optional"` - Field string `river:"field,attr,optional"` + Role string `alloy:"role,attr"` + Label string `alloy:"label,attr,optional"` + Field string `alloy:"field,attr,optional"` } func (sc *SelectorConfig) convert() *promk8s.SelectorConfig { @@ -95,7 +95,7 @@ func (sc *SelectorConfig) convert() *promk8s.SelectorConfig { } type AttachMetadataConfig struct { - Node bool `river:"node,attr,optional"` + Node bool `alloy:"node,attr,optional"` } func (am *AttachMetadataConfig) convert() *promk8s.AttachMetadataConfig { diff --git a/internal/component/discovery/kuma/kuma.go b/internal/component/discovery/kuma/kuma.go index f4adf85de0..c91aa3b5f4 100644 --- a/internal/component/discovery/kuma/kuma.go +++ b/internal/component/discovery/kuma/kuma.go @@ -27,11 +27,11 @@ func init() { // Arguments configure the discovery.kuma component. type Arguments struct { - Server string `river:"server,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - FetchTimeout time.Duration `river:"fetch_timeout,attr,optional"` + Server string `alloy:"server,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + FetchTimeout time.Duration `alloy:"fetch_timeout,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } // DefaultArguments is used to initialize default values for Arguments. diff --git a/internal/component/discovery/linode/linode.go b/internal/component/discovery/linode/linode.go index 152167c010..8031bde15b 100644 --- a/internal/component/discovery/linode/linode.go +++ b/internal/component/discovery/linode/linode.go @@ -27,10 +27,10 @@ func init() { // Arguments configure the discovery.linode component. type Arguments struct { - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - TagSeparator string `river:"tag_separator,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + TagSeparator string `alloy:"tag_separator,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } // DefaultArguments is used to initialize default values for Arguments. diff --git a/internal/component/discovery/marathon/marathon.go b/internal/component/discovery/marathon/marathon.go index 1c75473aaf..e7630cf834 100644 --- a/internal/component/discovery/marathon/marathon.go +++ b/internal/component/discovery/marathon/marathon.go @@ -28,11 +28,11 @@ func init() { } type Arguments struct { - Servers []string `river:"servers,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - AuthToken alloytypes.Secret `river:"auth_token,attr,optional"` - AuthTokenFile string `river:"auth_token_file,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` + Servers []string `alloy:"servers,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + AuthToken alloytypes.Secret `alloy:"auth_token,attr,optional"` + AuthTokenFile string `alloy:"auth_token_file,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/nerve/nerve.go b/internal/component/discovery/nerve/nerve.go index d7a71f28ac..d8521a80c9 100644 --- a/internal/component/discovery/nerve/nerve.go +++ b/internal/component/discovery/nerve/nerve.go @@ -26,9 +26,9 @@ func init() { // Arguments configure the discovery.nerve component. type Arguments struct { - Servers []string `river:"servers,attr"` - Paths []string `river:"paths,attr"` - Timeout time.Duration `river:"timeout,attr,optional"` + Servers []string `alloy:"servers,attr"` + Paths []string `alloy:"paths,attr"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } // DefaultArguments is used to initialize default values for Arguments. diff --git a/internal/component/discovery/nomad/nomad.go b/internal/component/discovery/nomad/nomad.go index f2a86a4773..b324add03e 100644 --- a/internal/component/discovery/nomad/nomad.go +++ b/internal/component/discovery/nomad/nomad.go @@ -27,13 +27,13 @@ func init() { } type Arguments struct { - AllowStale bool `river:"allow_stale,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - Namespace string `river:"namespace,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Region string `river:"region,attr,optional"` - Server string `river:"server,attr,optional"` - TagSeparator string `river:"tag_separator,attr,optional"` + AllowStale bool `alloy:"allow_stale,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + Namespace string `alloy:"namespace,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Region string `alloy:"region,attr,optional"` + Server string `alloy:"server,attr,optional"` + TagSeparator string `alloy:"tag_separator,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/openstack/openstack.go b/internal/component/discovery/openstack/openstack.go index 1ab76dceb0..892126ce8e 100644 --- a/internal/component/discovery/openstack/openstack.go +++ b/internal/component/discovery/openstack/openstack.go @@ -28,24 +28,24 @@ func init() { } type Arguments struct { - IdentityEndpoint string `river:"identity_endpoint,attr,optional"` - Username string `river:"username,attr,optional"` - UserID string `river:"userid,attr,optional"` - Password alloytypes.Secret `river:"password,attr,optional"` - ProjectName string `river:"project_name,attr,optional"` - ProjectID string `river:"project_id,attr,optional"` - DomainName string `river:"domain_name,attr,optional"` - DomainID string `river:"domain_id,attr,optional"` - ApplicationCredentialName string `river:"application_credential_name,attr,optional"` - ApplicationCredentialID string `river:"application_credential_id,attr,optional"` - ApplicationCredentialSecret alloytypes.Secret `river:"application_credential_secret,attr,optional"` - Role string `river:"role,attr"` - Region string `river:"region,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - AllTenants bool `river:"all_tenants,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` - Availability string `river:"availability,attr,optional"` + IdentityEndpoint string `alloy:"identity_endpoint,attr,optional"` + Username string `alloy:"username,attr,optional"` + UserID string `alloy:"userid,attr,optional"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + ProjectName string `alloy:"project_name,attr,optional"` + ProjectID string `alloy:"project_id,attr,optional"` + DomainName string `alloy:"domain_name,attr,optional"` + DomainID string `alloy:"domain_id,attr,optional"` + ApplicationCredentialName string `alloy:"application_credential_name,attr,optional"` + ApplicationCredentialID string `alloy:"application_credential_id,attr,optional"` + ApplicationCredentialSecret alloytypes.Secret `alloy:"application_credential_secret,attr,optional"` + Role string `alloy:"role,attr"` + Region string `alloy:"region,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + AllTenants bool `alloy:"all_tenants,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` + Availability string `alloy:"availability,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/ovhcloud/ovhcloud.go b/internal/component/discovery/ovhcloud/ovhcloud.go index 9fc31defe8..1117b35381 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud.go +++ b/internal/component/discovery/ovhcloud/ovhcloud.go @@ -28,12 +28,12 @@ func init() { // Arguments configure the discovery.ovhcloud component. type Arguments struct { - Endpoint string `river:"endpoint,attr,optional"` - ApplicationKey string `river:"application_key,attr"` - ApplicationSecret alloytypes.Secret `river:"application_secret,attr"` - ConsumerKey alloytypes.Secret `river:"consumer_key,attr"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Service string `river:"service,attr"` + Endpoint string `alloy:"endpoint,attr,optional"` + ApplicationKey string `alloy:"application_key,attr"` + ApplicationSecret alloytypes.Secret `alloy:"application_secret,attr"` + ConsumerKey alloytypes.Secret `alloy:"consumer_key,attr"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Service string `alloy:"service,attr"` } // DefaultArguments is used to initialize default values for Arguments. diff --git a/internal/component/discovery/process/args.go b/internal/component/discovery/process/args.go index 7323b7858c..45ed6649bb 100644 --- a/internal/component/discovery/process/args.go +++ b/internal/component/discovery/process/args.go @@ -7,18 +7,18 @@ import ( ) type Arguments struct { - Join []discovery.Target `river:"join,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - DiscoverConfig DiscoverConfig `river:"discover_config,block,optional"` + Join []discovery.Target `alloy:"join,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + DiscoverConfig DiscoverConfig `alloy:"discover_config,block,optional"` } type DiscoverConfig struct { - Cwd bool `river:"cwd,attr,optional"` - Exe bool `river:"exe,attr,optional"` - Commandline bool `river:"commandline,attr,optional"` - Username bool `river:"username,attr,optional"` - UID bool `river:"uid,attr,optional"` - ContainerID bool `river:"container_id,attr,optional"` + Cwd bool `alloy:"cwd,attr,optional"` + Exe bool `alloy:"exe,attr,optional"` + Commandline bool `alloy:"commandline,attr,optional"` + Username bool `alloy:"username,attr,optional"` + UID bool `alloy:"uid,attr,optional"` + ContainerID bool `alloy:"container_id,attr,optional"` } var DefaultConfig = Arguments{ diff --git a/internal/component/discovery/puppetdb/puppetdb.go b/internal/component/discovery/puppetdb/puppetdb.go index 7299051ea7..1b1b29ea87 100644 --- a/internal/component/discovery/puppetdb/puppetdb.go +++ b/internal/component/discovery/puppetdb/puppetdb.go @@ -27,12 +27,12 @@ func init() { } type Arguments struct { - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - URL string `river:"url,attr"` - Query string `river:"query,attr"` - IncludeParameters bool `river:"include_parameters,attr,optional"` - Port int `river:"port,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + URL string `alloy:"url,attr"` + Query string `alloy:"query,attr"` + IncludeParameters bool `alloy:"include_parameters,attr,optional"` + Port int `alloy:"port,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/relabel/relabel.go b/internal/component/discovery/relabel/relabel.go index 77519abcf8..dc75ba4418 100644 --- a/internal/component/discovery/relabel/relabel.go +++ b/internal/component/discovery/relabel/relabel.go @@ -28,16 +28,16 @@ func init() { // Arguments holds values which are used to configure the discovery.relabel component. type Arguments struct { // Targets contains the input 'targets' passed by a service discovery component. - Targets []discovery.Target `river:"targets,attr"` + Targets []discovery.Target `alloy:"targets,attr"` // The relabelling rules to apply to each target's label set. - RelabelConfigs []*flow_relabel.Config `river:"rule,block,optional"` + RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` } // Exports holds values which are exported by the discovery.relabel component. type Exports struct { - Output []discovery.Target `river:"output,attr"` - Rules flow_relabel.Rules `river:"rules,attr"` + Output []discovery.Target `alloy:"output,attr"` + Rules flow_relabel.Rules `alloy:"rules,attr"` } // Component implements the discovery.relabel component. diff --git a/internal/component/discovery/scaleway/scaleway.go b/internal/component/discovery/scaleway/scaleway.go index 7a50f4073e..2b2a30ce7a 100644 --- a/internal/component/discovery/scaleway/scaleway.go +++ b/internal/component/discovery/scaleway/scaleway.go @@ -32,22 +32,22 @@ func init() { } type Arguments struct { - Project string `river:"project_id,attr"` - Role Role `river:"role,attr"` - APIURL string `river:"api_url,attr,optional"` - Zone string `river:"zone,attr,optional"` - AccessKey string `river:"access_key,attr"` - SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` - SecretKeyFile string `river:"secret_key_file,attr,optional"` - NameFilter string `river:"name_filter,attr,optional"` - TagsFilter []string `river:"tags_filter,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` - - ProxyConfig *config.ProxyConfig `river:",squash"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` - FollowRedirects bool `river:"follow_redirects,attr,optional"` - EnableHTTP2 bool `river:"enable_http2,attr,optional"` + Project string `alloy:"project_id,attr"` + Role Role `alloy:"role,attr"` + APIURL string `alloy:"api_url,attr,optional"` + Zone string `alloy:"zone,attr,optional"` + AccessKey string `alloy:"access_key,attr"` + SecretKey alloytypes.Secret `alloy:"secret_key,attr,optional"` + SecretKeyFile string `alloy:"secret_key_file,attr,optional"` + NameFilter string `alloy:"name_filter,attr,optional"` + TagsFilter []string `alloy:"tags_filter,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Port int `alloy:"port,attr,optional"` + + ProxyConfig *config.ProxyConfig `alloy:",squash"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` + FollowRedirects bool `alloy:"follow_redirects,attr,optional"` + EnableHTTP2 bool `alloy:"enable_http2,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/serverset/serverset.go b/internal/component/discovery/serverset/serverset.go index c63855822f..e706e3a8e7 100644 --- a/internal/component/discovery/serverset/serverset.go +++ b/internal/component/discovery/serverset/serverset.go @@ -27,9 +27,9 @@ func init() { } type Arguments struct { - Servers []string `river:"servers,attr"` - Paths []string `river:"paths,attr"` - Timeout time.Duration `river:"timeout,attr,optional"` + Servers []string `alloy:"servers,attr"` + Paths []string `alloy:"paths,attr"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/triton/triton.go b/internal/component/discovery/triton/triton.go index 0aa1fea3f9..c74e7c5283 100644 --- a/internal/component/discovery/triton/triton.go +++ b/internal/component/discovery/triton/triton.go @@ -26,15 +26,15 @@ func init() { } type Arguments struct { - Account string `river:"account,attr"` - Role string `river:"role,attr,optional"` - DNSSuffix string `river:"dns_suffix,attr"` - Endpoint string `river:"endpoint,attr"` - Groups []string `river:"groups,attr,optional"` - Port int `river:"port,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Version int `river:"version,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` + Account string `alloy:"account,attr"` + Role string `alloy:"role,attr,optional"` + DNSSuffix string `alloy:"dns_suffix,attr"` + Endpoint string `alloy:"endpoint,attr"` + Groups []string `alloy:"groups,attr,optional"` + Port int `alloy:"port,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + Version int `alloy:"version,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/discovery/uyuni/uyuni.go b/internal/component/discovery/uyuni/uyuni.go index 84d155100f..f9408d0a22 100644 --- a/internal/component/discovery/uyuni/uyuni.go +++ b/internal/component/discovery/uyuni/uyuni.go @@ -29,16 +29,16 @@ func init() { } type Arguments struct { - Server string `river:"server,attr"` - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` - Entitlement string `river:"entitlement,attr,optional"` - Separator string `river:"separator,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - ProxyConfig *config.ProxyConfig `river:",squash"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` - FollowRedirects bool `river:"follow_redirects,attr,optional"` - EnableHTTP2 bool `river:"enable_http2,attr,optional"` + Server string `alloy:"server,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` + Entitlement string `alloy:"entitlement,attr,optional"` + Separator string `alloy:"separator,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` + ProxyConfig *config.ProxyConfig `alloy:",squash"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` + FollowRedirects bool `alloy:"follow_redirects,attr,optional"` + EnableHTTP2 bool `alloy:"enable_http2,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index d9b006d26f..3effb1f964 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -12,11 +12,11 @@ import ( // Arguments configures the app_agent_receiver component. type Arguments struct { - LogLabels map[string]string `river:"extra_log_labels,attr,optional"` + LogLabels map[string]string `alloy:"extra_log_labels,attr,optional"` - Server ServerArguments `river:"server,block,optional"` - SourceMaps SourceMapsArguments `river:"sourcemaps,block,optional"` - Output OutputArguments `river:"output,block"` + Server ServerArguments `alloy:"server,block,optional"` + SourceMaps SourceMapsArguments `alloy:"sourcemaps,block,optional"` + Output OutputArguments `alloy:"output,block"` } var _ syntax.Defaulter = (*Arguments)(nil) @@ -30,13 +30,13 @@ func (args *Arguments) SetToDefault() { // ServerArguments configures the HTTP server where telemetry information will // be sent from Faro clients. type ServerArguments struct { - Host string `river:"listen_address,attr,optional"` - Port int `river:"listen_port,attr,optional"` - CORSAllowedOrigins []string `river:"cors_allowed_origins,attr,optional"` - APIKey alloytypes.Secret `river:"api_key,attr,optional"` - MaxAllowedPayloadSize units.Base2Bytes `river:"max_allowed_payload_size,attr,optional"` + Host string `alloy:"listen_address,attr,optional"` + Port int `alloy:"listen_port,attr,optional"` + CORSAllowedOrigins []string `alloy:"cors_allowed_origins,attr,optional"` + APIKey alloytypes.Secret `alloy:"api_key,attr,optional"` + MaxAllowedPayloadSize units.Base2Bytes `alloy:"max_allowed_payload_size,attr,optional"` - RateLimiting RateLimitingArguments `river:"rate_limiting,block,optional"` + RateLimiting RateLimitingArguments `alloy:"rate_limiting,block,optional"` } func (s *ServerArguments) SetToDefault() { @@ -50,9 +50,9 @@ func (s *ServerArguments) SetToDefault() { // RateLimitingArguments configures rate limiting for the HTTP server. type RateLimitingArguments struct { - Enabled bool `river:"enabled,attr,optional"` - Rate float64 `river:"rate,attr,optional"` - BurstSize float64 `river:"burst_size,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + Rate float64 `alloy:"rate,attr,optional"` + BurstSize float64 `alloy:"burst_size,attr,optional"` } func (r *RateLimitingArguments) SetToDefault() { @@ -66,10 +66,10 @@ func (r *RateLimitingArguments) SetToDefault() { // SourceMapsArguments configures how app_agent_receiver will retrieve source // maps for transforming stack traces. type SourceMapsArguments struct { - Download bool `river:"download,attr,optional"` - DownloadFromOrigins []string `river:"download_from_origins,attr,optional"` - DownloadTimeout time.Duration `river:"download_timeout,attr,optional"` - Locations []LocationArguments `river:"location,block,optional"` + Download bool `alloy:"download,attr,optional"` + DownloadFromOrigins []string `alloy:"download_from_origins,attr,optional"` + DownloadTimeout time.Duration `alloy:"download_timeout,attr,optional"` + Locations []LocationArguments `alloy:"location,block,optional"` } func (s *SourceMapsArguments) SetToDefault() { @@ -82,13 +82,13 @@ func (s *SourceMapsArguments) SetToDefault() { // LocationArguments specifies an individual location where source maps will be loaded. type LocationArguments struct { - Path string `river:"path,attr"` - MinifiedPathPrefix string `river:"minified_path_prefix,attr"` + Path string `alloy:"path,attr"` + MinifiedPathPrefix string `alloy:"minified_path_prefix,attr"` } // OutputArguments configures where to send emitted logs and traces. Metrics // emitted by app_agent_receiver are exported as targets to be scraped. type OutputArguments struct { - Logs []loki.LogsReceiver `river:"logs,attr,optional"` - Traces []otelcol.Consumer `river:"traces,attr,optional"` + Logs []loki.LogsReceiver `alloy:"logs,attr,optional"` + Traces []otelcol.Consumer `alloy:"traces,attr,optional"` } diff --git a/internal/component/local/file/file.go b/internal/component/local/file/file.go index 194f340474..eb7899a8d8 100644 --- a/internal/component/local/file/file.go +++ b/internal/component/local/file/file.go @@ -40,15 +40,15 @@ func init() { // Arguments holds values which are used to configure the local.file component. type Arguments struct { // Filename indicates the file to watch. - Filename string `river:"filename,attr"` + Filename string `alloy:"filename,attr"` // Type indicates how to detect changes to the file. - Type filedetector.Detector `river:"detector,attr,optional"` + Type filedetector.Detector `alloy:"detector,attr,optional"` // PollFrequency determines the frequency to check for changes when Type is // Poll. - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` // IsSecret marks the file as holding a secret value which should not be // displayed to the user. - IsSecret bool `river:"is_secret,attr,optional"` + IsSecret bool `alloy:"is_secret,attr,optional"` } // DefaultArguments provides the default arguments for the local.file @@ -66,7 +66,7 @@ func (a *Arguments) SetToDefault() { // Exports holds values which are exported by the local.file component. type Exports struct { // Content of the file. - Content alloytypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `alloy:"content,attr"` } // Component implements the local.file component. diff --git a/internal/component/local/file_match/file.go b/internal/component/local/file_match/file.go index b3cba10824..00f3ffad7d 100644 --- a/internal/component/local/file_match/file.go +++ b/internal/component/local/file_match/file.go @@ -26,8 +26,8 @@ func init() { // Arguments holds values which are used to configure the local.file_match // component. type Arguments struct { - PathTargets []discovery.Target `river:"path_targets,attr"` - SyncPeriod time.Duration `river:"sync_period,attr,optional"` + PathTargets []discovery.Target `alloy:"path_targets,attr"` + SyncPeriod time.Duration `alloy:"sync_period,attr,optional"` } var _ component.Component = (*Component)(nil) diff --git a/internal/component/loki/echo/echo.go b/internal/component/loki/echo/echo.go index 63e2fc373a..9f0ca8d6a1 100644 --- a/internal/component/loki/echo/echo.go +++ b/internal/component/loki/echo/echo.go @@ -29,7 +29,7 @@ type Arguments struct{} // Exports holds the values exported by the loki.echo component. type Exports struct { - Receiver loki.LogsReceiver `river:"receiver,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` } // DefaultArguments defines the default settings for log scraping. diff --git a/internal/component/loki/process/metric/counters.go b/internal/component/loki/process/metric/counters.go index a289553510..c5b6e15d21 100644 --- a/internal/component/loki/process/metric/counters.go +++ b/internal/component/loki/process/metric/counters.go @@ -18,17 +18,17 @@ const ( // CounterConfig defines a counter metric whose value only goes up. type CounterConfig struct { // Shared fields - Name string `river:"name,attr"` - Description string `river:"description,attr,optional"` - Source string `river:"source,attr,optional"` - Prefix string `river:"prefix,attr,optional"` - MaxIdle time.Duration `river:"max_idle_duration,attr,optional"` - Value string `river:"value,attr,optional"` + Name string `alloy:"name,attr"` + Description string `alloy:"description,attr,optional"` + Source string `alloy:"source,attr,optional"` + Prefix string `alloy:"prefix,attr,optional"` + MaxIdle time.Duration `alloy:"max_idle_duration,attr,optional"` + Value string `alloy:"value,attr,optional"` // Counter-specific fields - Action string `river:"action,attr"` - MatchAll bool `river:"match_all,attr,optional"` - CountEntryBytes bool `river:"count_entry_bytes,attr,optional"` + Action string `alloy:"action,attr"` + MatchAll bool `alloy:"match_all,attr,optional"` + CountEntryBytes bool `alloy:"count_entry_bytes,attr,optional"` } // DefaultCounterConfig sets the default for a Counter. diff --git a/internal/component/loki/process/metric/gauges.go b/internal/component/loki/process/metric/gauges.go index f3222af161..62d3c258df 100644 --- a/internal/component/loki/process/metric/gauges.go +++ b/internal/component/loki/process/metric/gauges.go @@ -29,15 +29,15 @@ var DefaultGaugeConfig = GaugeConfig{ // GaugeConfig defines a gauge metric whose value can go up or down. type GaugeConfig struct { // Shared fields - Name string `river:"name,attr"` - Description string `river:"description,attr,optional"` - Source string `river:"source,attr,optional"` - Prefix string `river:"prefix,attr,optional"` - MaxIdle time.Duration `river:"max_idle_duration,attr,optional"` - Value string `river:"value,attr,optional"` + Name string `alloy:"name,attr"` + Description string `alloy:"description,attr,optional"` + Source string `alloy:"source,attr,optional"` + Prefix string `alloy:"prefix,attr,optional"` + MaxIdle time.Duration `alloy:"max_idle_duration,attr,optional"` + Value string `alloy:"value,attr,optional"` // Gauge-specific fields - Action string `river:"action,attr"` + Action string `alloy:"action,attr"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/process/metric/histograms.go b/internal/component/loki/process/metric/histograms.go index 6ce2b2b5f3..447ad62ae6 100644 --- a/internal/component/loki/process/metric/histograms.go +++ b/internal/component/loki/process/metric/histograms.go @@ -18,15 +18,15 @@ var DefaultHistogramConfig = HistogramConfig{ // HistogramConfig defines a histogram metric whose values are bucketed. type HistogramConfig struct { // Shared fields - Name string `river:"name,attr"` - Description string `river:"description,attr,optional"` - Source string `river:"source,attr,optional"` - Prefix string `river:"prefix,attr,optional"` - MaxIdle time.Duration `river:"max_idle_duration,attr,optional"` - Value string `river:"value,attr,optional"` + Name string `alloy:"name,attr"` + Description string `alloy:"description,attr,optional"` + Source string `alloy:"source,attr,optional"` + Prefix string `alloy:"prefix,attr,optional"` + MaxIdle time.Duration `alloy:"max_idle_duration,attr,optional"` + Value string `alloy:"value,attr,optional"` // Histogram-specific fields - Buckets []float64 `river:"buckets,attr"` + Buckets []float64 `alloy:"buckets,attr"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/process/process.go b/internal/component/loki/process/process.go index b084582a95..4183c4b275 100644 --- a/internal/component/loki/process/process.go +++ b/internal/component/loki/process/process.go @@ -34,14 +34,14 @@ func init() { // Arguments holds values which are used to configure the loki.process // component. type Arguments struct { - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - Stages []stages.StageConfig `river:"stage,enum,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + Stages []stages.StageConfig `alloy:"stage,enum,optional"` } // Exports exposes the receiver that can be used to send log entries to // loki.process. type Exports struct { - Receiver loki.LogsReceiver `river:"receiver,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` } var ( diff --git a/internal/component/loki/process/process_test.go b/internal/component/loki/process/process_test.go index 5178cc92c2..cb3e4b925f 100644 --- a/internal/component/loki/process/process_test.go +++ b/internal/component/loki/process/process_test.go @@ -64,7 +64,7 @@ func TestJSONLabelsStage(t *testing.T) { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Stages []stages.StageConfig `river:"stage,enum"` + Stages []stages.StageConfig `alloy:"stage,enum"` } var stagesCfg cfg err := syntax.Unmarshal([]byte(stg), &stagesCfg) @@ -151,7 +151,7 @@ stage.label_keep { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Stages []stages.StageConfig `river:"stage,enum"` + Stages []stages.StageConfig `alloy:"stage,enum"` } var stagesCfg cfg err := syntax.Unmarshal([]byte(stg), &stagesCfg) @@ -246,7 +246,7 @@ stage.labels { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Stages []stages.StageConfig `river:"stage,enum"` + Stages []stages.StageConfig `alloy:"stage,enum"` } var stagesCfg cfg err := syntax.Unmarshal([]byte(stg), &stagesCfg) @@ -412,7 +412,7 @@ func TestDeadlockWithFrequentUpdates(t *testing.T) { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Stages []stages.StageConfig `river:"stage,enum"` + Stages []stages.StageConfig `alloy:"stage,enum"` } var stagesCfg cfg err := syntax.Unmarshal([]byte(stg), &stagesCfg) diff --git a/internal/component/loki/process/stages/drop.go b/internal/component/loki/process/stages/drop.go index 96c212ba3c..e1c3b44421 100644 --- a/internal/component/loki/process/stages/drop.go +++ b/internal/component/loki/process/stages/drop.go @@ -30,13 +30,13 @@ var ( // DropConfig contains the configuration for a dropStage type DropConfig struct { - DropReason string `river:"drop_counter_reason,attr,optional"` - Source string `river:"source,attr,optional"` - Value string `river:"value,attr,optional"` - Separator string `river:"separator,attr,optional"` - Expression string `river:"expression,attr,optional"` - OlderThan time.Duration `river:"older_than,attr,optional"` - LongerThan units.Base2Bytes `river:"longer_than,attr,optional"` + DropReason string `alloy:"drop_counter_reason,attr,optional"` + Source string `alloy:"source,attr,optional"` + Value string `alloy:"value,attr,optional"` + Separator string `alloy:"separator,attr,optional"` + Expression string `alloy:"expression,attr,optional"` + OlderThan time.Duration `alloy:"older_than,attr,optional"` + LongerThan units.Base2Bytes `alloy:"longer_than,attr,optional"` regex *regexp.Regexp } diff --git a/internal/component/loki/process/stages/eventlogmessage.go b/internal/component/loki/process/stages/eventlogmessage.go index 247b801639..58cab36a5d 100644 --- a/internal/component/loki/process/stages/eventlogmessage.go +++ b/internal/component/loki/process/stages/eventlogmessage.go @@ -14,9 +14,9 @@ const ( ) type EventLogMessageConfig struct { - Source string `river:"source,attr,optional"` - DropInvalidLabels bool `river:"drop_invalid_labels,attr,optional"` - OverwriteExisting bool `river:"overwrite_existing,attr,optional"` + Source string `alloy:"source,attr,optional"` + DropInvalidLabels bool `alloy:"drop_invalid_labels,attr,optional"` + OverwriteExisting bool `alloy:"overwrite_existing,attr,optional"` } func (e *EventLogMessageConfig) Validate() error { diff --git a/internal/component/loki/process/stages/extensions.go b/internal/component/loki/process/stages/extensions.go index 61071b8825..7b0cccd110 100644 --- a/internal/component/loki/process/stages/extensions.go +++ b/internal/component/loki/process/stages/extensions.go @@ -22,9 +22,9 @@ type DockerConfig struct{} // CRIConfig is an empty struct that is used to enable a pre-defined pipeline // for decoding entries that are using the CRI logging format. type CRIConfig struct { - MaxPartialLines int `river:"max_partial_lines,attr,optional"` - MaxPartialLineSize uint64 `river:"max_partial_line_size,attr,optional"` - MaxPartialLineSizeTruncate bool `river:"max_partial_line_size_truncate,attr,optional"` + MaxPartialLines int `alloy:"max_partial_lines,attr,optional"` + MaxPartialLineSize uint64 `alloy:"max_partial_line_size,attr,optional"` + MaxPartialLineSizeTruncate bool `alloy:"max_partial_line_size_truncate,attr,optional"` } var ( diff --git a/internal/component/loki/process/stages/geoip.go b/internal/component/loki/process/stages/geoip.go index 505077ecfe..8822b9f20f 100644 --- a/internal/component/loki/process/stages/geoip.go +++ b/internal/component/loki/process/stages/geoip.go @@ -56,10 +56,10 @@ var fields = map[GeoIPFields]string{ // GeoIPConfig represents GeoIP stage config type GeoIPConfig struct { - DB string `river:"db,attr"` - Source *string `river:"source,attr"` - DBType string `river:"db_type,attr,optional"` - CustomLookups map[string]string `river:"custom_lookups,attr,optional"` + DB string `alloy:"db,attr"` + Source *string `alloy:"source,attr"` + DBType string `alloy:"db_type,attr,optional"` + CustomLookups map[string]string `alloy:"custom_lookups,attr,optional"` } func validateGeoIPConfig(c GeoIPConfig) (map[string]*jmespath.JMESPath, error) { diff --git a/internal/component/loki/process/stages/json.go b/internal/component/loki/process/stages/json.go index 5f98209b46..4ee263cbc4 100644 --- a/internal/component/loki/process/stages/json.go +++ b/internal/component/loki/process/stages/json.go @@ -22,9 +22,9 @@ const ( // JSONConfig represents a JSON Stage configuration type JSONConfig struct { - Expressions map[string]string `river:"expressions,attr"` - Source *string `river:"source,attr,optional"` - DropMalformed bool `river:"drop_malformed,attr,optional"` + Expressions map[string]string `alloy:"expressions,attr"` + Source *string `alloy:"source,attr,optional"` + DropMalformed bool `alloy:"drop_malformed,attr,optional"` } // validateJSONConfig validates a json config and returns a map of necessary jmespath expressions. diff --git a/internal/component/loki/process/stages/label_drop.go b/internal/component/loki/process/stages/label_drop.go index ae117220ce..143bd16d4f 100644 --- a/internal/component/loki/process/stages/label_drop.go +++ b/internal/component/loki/process/stages/label_drop.go @@ -12,7 +12,7 @@ var ErrEmptyLabelDropStageConfig = errors.New("labeldrop stage config cannot be // LabelDropConfig contains the slice of labels to be dropped. type LabelDropConfig struct { - Values []string `river:"values,attr"` + Values []string `alloy:"values,attr"` } func newLabelDropStage(config LabelDropConfig) (Stage, error) { diff --git a/internal/component/loki/process/stages/label_keep.go b/internal/component/loki/process/stages/label_keep.go index c06baa31f0..1887706319 100644 --- a/internal/component/loki/process/stages/label_keep.go +++ b/internal/component/loki/process/stages/label_keep.go @@ -12,7 +12,7 @@ var ErrEmptyLabelAllowStageConfig = errors.New("labelallow stage config cannot b // LabelAllowConfig contains the slice of labels to allow through. type LabelAllowConfig struct { - Values []string `river:"values,attr"` + Values []string `alloy:"values,attr"` } func newLabelAllowStage(config LabelAllowConfig) (Stage, error) { diff --git a/internal/component/loki/process/stages/labels.go b/internal/component/loki/process/stages/labels.go index 5faac02cc0..5bec9583ff 100644 --- a/internal/component/loki/process/stages/labels.go +++ b/internal/component/loki/process/stages/labels.go @@ -18,7 +18,7 @@ const ( // LabelsConfig is a set of labels to be extracted type LabelsConfig struct { - Values map[string]*string `river:"values,attr"` + Values map[string]*string `alloy:"values,attr"` } // validateLabelsConfig validates the Label stage configuration diff --git a/internal/component/loki/process/stages/limit.go b/internal/component/loki/process/stages/limit.go index aee76ee519..51b0c3e825 100644 --- a/internal/component/loki/process/stages/limit.go +++ b/internal/component/loki/process/stages/limit.go @@ -24,11 +24,11 @@ const MinReasonableMaxDistinctLabels = 10000 // 80bytes per rate.Limiter ~ 1MiB // LimitConfig sets up a Limit stage. type LimitConfig struct { - Rate float64 `river:"rate,attr"` - Burst int `river:"burst,attr"` - Drop bool `river:"drop,attr,optional"` - ByLabelName string `river:"by_label_name,attr,optional"` - MaxDistinctLabels int `river:"max_distinct_labels,attr,optional"` + Rate float64 `alloy:"rate,attr"` + Burst int `alloy:"burst,attr"` + Drop bool `alloy:"drop,attr,optional"` + ByLabelName string `alloy:"by_label_name,attr,optional"` + MaxDistinctLabels int `alloy:"max_distinct_labels,attr,optional"` } func newLimitStage(logger log.Logger, cfg LimitConfig, registerer prometheus.Registerer) (Stage, error) { diff --git a/internal/component/loki/process/stages/logfmt.go b/internal/component/loki/process/stages/logfmt.go index a70c84f527..0cf006b265 100644 --- a/internal/component/loki/process/stages/logfmt.go +++ b/internal/component/loki/process/stages/logfmt.go @@ -21,8 +21,8 @@ var ( // LogfmtConfig represents a logfmt Stage configuration type LogfmtConfig struct { - Mapping map[string]string `river:"mapping,attr"` - Source string `river:"source,attr,optional"` + Mapping map[string]string `alloy:"mapping,attr"` + Source string `alloy:"source,attr,optional"` } // validateLogfmtConfig validates a logfmt stage config and returns an inverse mapping of configured mapping. diff --git a/internal/component/loki/process/stages/match.go b/internal/component/loki/process/stages/match.go index 4b84bb99a9..3a06634670 100644 --- a/internal/component/loki/process/stages/match.go +++ b/internal/component/loki/process/stages/match.go @@ -27,11 +27,11 @@ var ( // MatchConfig contains the configuration for a matcherStage type MatchConfig struct { - Selector string `river:"selector,attr"` - Stages []StageConfig `river:"stage,enum,optional"` - Action string `river:"action,attr,optional"` - PipelineName string `river:"pipeline_name,attr,optional"` - DropReason string `river:"drop_counter_reason,attr,optional"` + Selector string `alloy:"selector,attr"` + Stages []StageConfig `alloy:"stage,enum,optional"` + Action string `alloy:"action,attr,optional"` + PipelineName string `alloy:"pipeline_name,attr,optional"` + DropReason string `alloy:"drop_counter_reason,attr,optional"` } // validateMatcherConfig validates the MatcherConfig for the matcherStage diff --git a/internal/component/loki/process/stages/metric.go b/internal/component/loki/process/stages/metric.go index 983354abd6..8d0ca48785 100644 --- a/internal/component/loki/process/stages/metric.go +++ b/internal/component/loki/process/stages/metric.go @@ -35,14 +35,14 @@ var ( // MetricConfig is a single metrics configuration. // TODO(@tpaschalis) Rework once River squashing is implemented. type MetricConfig struct { - Counter *metric.CounterConfig `river:"counter,block,optional"` - Gauge *metric.GaugeConfig `river:"gauge,block,optional"` - Histogram *metric.HistogramConfig `river:"histogram,block,optional"` + Counter *metric.CounterConfig `alloy:"counter,block,optional"` + Gauge *metric.GaugeConfig `alloy:"gauge,block,optional"` + Histogram *metric.HistogramConfig `alloy:"histogram,block,optional"` } // MetricsConfig is a set of configured metrics. type MetricsConfig struct { - Metrics []MetricConfig `river:"metric,enum,optional"` + Metrics []MetricConfig `alloy:"metric,enum,optional"` } type cfgCollector struct { diff --git a/internal/component/loki/process/stages/multiline.go b/internal/component/loki/process/stages/multiline.go index 677fe5c09b..0e4a7d234b 100644 --- a/internal/component/loki/process/stages/multiline.go +++ b/internal/component/loki/process/stages/multiline.go @@ -24,9 +24,9 @@ var ( // MultilineConfig contains the configuration for a Multiline stage. type MultilineConfig struct { - Expression string `river:"firstline,attr"` - MaxLines uint64 `river:"max_lines,attr,optional"` - MaxWaitTime time.Duration `river:"max_wait_time,attr,optional"` + Expression string `alloy:"firstline,attr"` + MaxLines uint64 `alloy:"max_lines,attr,optional"` + MaxWaitTime time.Duration `alloy:"max_wait_time,attr,optional"` regex *regexp.Regexp } diff --git a/internal/component/loki/process/stages/output.go b/internal/component/loki/process/stages/output.go index ae68a59384..b82d3cd3fb 100644 --- a/internal/component/loki/process/stages/output.go +++ b/internal/component/loki/process/stages/output.go @@ -19,7 +19,7 @@ var ( // OutputConfig initializes a configuration stage which sets the log line to a // value from the extracted map. type OutputConfig struct { - Source string `river:"source,attr"` + Source string `alloy:"source,attr"` } // newOutputStage creates a new outputStage diff --git a/internal/component/loki/process/stages/pack.go b/internal/component/loki/process/stages/pack.go index a5d1633b40..2aece44982 100644 --- a/internal/component/loki/process/stages/pack.go +++ b/internal/component/loki/process/stages/pack.go @@ -101,8 +101,8 @@ func (w Packed) MarshalJSON() ([]byte, error) { // PackConfig contains the configuration for a packStage type PackConfig struct { - Labels []string `river:"labels,attr"` - IngestTimestamp bool `river:"ingest_timestamp,attr,optional"` + Labels []string `alloy:"labels,attr"` + IngestTimestamp bool `alloy:"ingest_timestamp,attr,optional"` } // DefaultPackConfig sets the defaults. diff --git a/internal/component/loki/process/stages/pipeline.go b/internal/component/loki/process/stages/pipeline.go index fb1be291e5..65885c9140 100644 --- a/internal/component/loki/process/stages/pipeline.go +++ b/internal/component/loki/process/stages/pipeline.go @@ -16,31 +16,31 @@ import ( // exactly one is set. type StageConfig struct { //TODO(thampiotr): sync these with new stages - CRIConfig *CRIConfig `river:"cri,block,optional"` - DecolorizeConfig *DecolorizeConfig `river:"decolorize,block,optional"` - DockerConfig *DockerConfig `river:"docker,block,optional"` - DropConfig *DropConfig `river:"drop,block,optional"` - EventLogMessageConfig *EventLogMessageConfig `river:"eventlogmessage,block,optional"` - GeoIPConfig *GeoIPConfig `river:"geoip,block,optional"` - JSONConfig *JSONConfig `river:"json,block,optional"` - LabelAllowConfig *LabelAllowConfig `river:"label_keep,block,optional"` - LabelDropConfig *LabelDropConfig `river:"label_drop,block,optional"` - LabelsConfig *LabelsConfig `river:"labels,block,optional"` - LimitConfig *LimitConfig `river:"limit,block,optional"` - LogfmtConfig *LogfmtConfig `river:"logfmt,block,optional"` - MatchConfig *MatchConfig `river:"match,block,optional"` - MetricsConfig *MetricsConfig `river:"metrics,block,optional"` - MultilineConfig *MultilineConfig `river:"multiline,block,optional"` - OutputConfig *OutputConfig `river:"output,block,optional"` - PackConfig *PackConfig `river:"pack,block,optional"` - RegexConfig *RegexConfig `river:"regex,block,optional"` - ReplaceConfig *ReplaceConfig `river:"replace,block,optional"` - StaticLabelsConfig *StaticLabelsConfig `river:"static_labels,block,optional"` - StructuredMetadata *LabelsConfig `river:"structured_metadata,block,optional"` - SamplingConfig *SamplingConfig `river:"sampling,block,optional"` - TemplateConfig *TemplateConfig `river:"template,block,optional"` - TenantConfig *TenantConfig `river:"tenant,block,optional"` - TimestampConfig *TimestampConfig `river:"timestamp,block,optional"` + CRIConfig *CRIConfig `alloy:"cri,block,optional"` + DecolorizeConfig *DecolorizeConfig `alloy:"decolorize,block,optional"` + DockerConfig *DockerConfig `alloy:"docker,block,optional"` + DropConfig *DropConfig `alloy:"drop,block,optional"` + EventLogMessageConfig *EventLogMessageConfig `alloy:"eventlogmessage,block,optional"` + GeoIPConfig *GeoIPConfig `alloy:"geoip,block,optional"` + JSONConfig *JSONConfig `alloy:"json,block,optional"` + LabelAllowConfig *LabelAllowConfig `alloy:"label_keep,block,optional"` + LabelDropConfig *LabelDropConfig `alloy:"label_drop,block,optional"` + LabelsConfig *LabelsConfig `alloy:"labels,block,optional"` + LimitConfig *LimitConfig `alloy:"limit,block,optional"` + LogfmtConfig *LogfmtConfig `alloy:"logfmt,block,optional"` + MatchConfig *MatchConfig `alloy:"match,block,optional"` + MetricsConfig *MetricsConfig `alloy:"metrics,block,optional"` + MultilineConfig *MultilineConfig `alloy:"multiline,block,optional"` + OutputConfig *OutputConfig `alloy:"output,block,optional"` + PackConfig *PackConfig `alloy:"pack,block,optional"` + RegexConfig *RegexConfig `alloy:"regex,block,optional"` + ReplaceConfig *ReplaceConfig `alloy:"replace,block,optional"` + StaticLabelsConfig *StaticLabelsConfig `alloy:"static_labels,block,optional"` + StructuredMetadata *LabelsConfig `alloy:"structured_metadata,block,optional"` + SamplingConfig *SamplingConfig `alloy:"sampling,block,optional"` + TemplateConfig *TemplateConfig `alloy:"template,block,optional"` + TenantConfig *TenantConfig `alloy:"tenant,block,optional"` + TimestampConfig *TimestampConfig `alloy:"timestamp,block,optional"` } var rateLimiter *rate.Limiter diff --git a/internal/component/loki/process/stages/pipeline_test.go b/internal/component/loki/process/stages/pipeline_test.go index d422cf2cc3..0fcf8917ff 100644 --- a/internal/component/loki/process/stages/pipeline_test.go +++ b/internal/component/loki/process/stages/pipeline_test.go @@ -22,7 +22,7 @@ import ( // Configs defines multiple StageConfigs as consequent blocks. type Configs struct { - Stages []StageConfig `river:"stage,enum,optional"` + Stages []StageConfig `alloy:"stage,enum,optional"` } func withInboundEntries(entries ...Entry) chan Entry { diff --git a/internal/component/loki/process/stages/regex.go b/internal/component/loki/process/stages/regex.go index bc4a3e0e57..ef3f28abf6 100644 --- a/internal/component/loki/process/stages/regex.go +++ b/internal/component/loki/process/stages/regex.go @@ -23,8 +23,8 @@ var ( // RegexConfig configures a processing stage uses regular expressions to // extract values from log lines into the shared values map. type RegexConfig struct { - Expression string `river:"expression,attr"` - Source *string `river:"source,attr,optional"` + Expression string `alloy:"expression,attr"` + Source *string `alloy:"source,attr,optional"` } // validateRegexConfig validates the config and return a regex diff --git a/internal/component/loki/process/stages/replace.go b/internal/component/loki/process/stages/replace.go index 8cbb6ce75e..266b5a5d9e 100644 --- a/internal/component/loki/process/stages/replace.go +++ b/internal/component/loki/process/stages/replace.go @@ -28,9 +28,9 @@ func init() { // ReplaceConfig contains a regexStage configuration type ReplaceConfig struct { - Expression string `river:"expression,attr"` - Source string `river:"source,attr,optional"` - Replace string `river:"replace,attr,optional"` + Expression string `alloy:"expression,attr"` + Source string `alloy:"source,attr,optional"` + Replace string `alloy:"replace,attr,optional"` } func getExpressionRegex(c ReplaceConfig) (*regexp.Regexp, error) { diff --git a/internal/component/loki/process/stages/sampling.go b/internal/component/loki/process/stages/sampling.go index 53bb6ad2e5..d2ef316535 100644 --- a/internal/component/loki/process/stages/sampling.go +++ b/internal/component/loki/process/stages/sampling.go @@ -22,8 +22,8 @@ var ( // SamplingConfig contains the configuration for a samplingStage type SamplingConfig struct { - DropReason *string `river:"drop_counter_reason,attr,optional"` - SamplingRate float64 `river:"rate,attr"` + DropReason *string `alloy:"drop_counter_reason,attr,optional"` + SamplingRate float64 `alloy:"rate,attr"` } func (s *SamplingConfig) SetToDefault() { diff --git a/internal/component/loki/process/stages/static_labels.go b/internal/component/loki/process/stages/static_labels.go index 8d1d2873f7..df8c5d244d 100644 --- a/internal/component/loki/process/stages/static_labels.go +++ b/internal/component/loki/process/stages/static_labels.go @@ -16,7 +16,7 @@ var ErrEmptyStaticLabelStageConfig = errors.New("static_labels stage config cann // StaticLabelsConfig contains a map of static labels to be set. type StaticLabelsConfig struct { - Values map[string]*string `river:"values,attr"` + Values map[string]*string `alloy:"values,attr"` } func newStaticLabelsStage(logger log.Logger, config StaticLabelsConfig) (Stage, error) { diff --git a/internal/component/loki/process/stages/template.go b/internal/component/loki/process/stages/template.go index 3da3a782e9..cf8c7ab50d 100644 --- a/internal/component/loki/process/stages/template.go +++ b/internal/component/loki/process/stages/template.go @@ -63,8 +63,8 @@ func init() { // TemplateConfig configures template value extraction. type TemplateConfig struct { - Source string `river:"source,attr"` - Template string `river:"template,attr"` + Source string `alloy:"source,attr"` + Template string `alloy:"template,attr"` } // validateTemplateConfig validates the templateStage config. diff --git a/internal/component/loki/process/stages/tenant.go b/internal/component/loki/process/stages/tenant.go index fab08ec479..a95cd2f1bc 100644 --- a/internal/component/loki/process/stages/tenant.go +++ b/internal/component/loki/process/stages/tenant.go @@ -26,9 +26,9 @@ type tenantStage struct { // TenantConfig configures a tenant stage. type TenantConfig struct { - Label string `river:"label,attr,optional"` - Source string `river:"source,attr,optional"` - Value string `river:"value,attr,optional"` + Label string `alloy:"label,attr,optional"` + Source string `alloy:"source,attr,optional"` + Value string `alloy:"value,attr,optional"` } // validateTenantConfig validates the tenant stage configuration diff --git a/internal/component/loki/process/stages/timestamp.go b/internal/component/loki/process/stages/timestamp.go index 7bb19c50e6..e7f8d12e98 100644 --- a/internal/component/loki/process/stages/timestamp.go +++ b/internal/component/loki/process/stages/timestamp.go @@ -44,11 +44,11 @@ var TimestampActionOnFailureOptions = []string{TimestampActionOnFailureSkip, Tim // TimestampConfig configures a processing stage for timestamp extraction. type TimestampConfig struct { - Source string `river:"source,attr"` - Format string `river:"format,attr"` - FallbackFormats []string `river:"fallback_formats,attr,optional"` - Location *string `river:"location,attr,optional"` - ActionOnFailure string `river:"action_on_failure,attr,optional"` + Source string `alloy:"source,attr"` + Format string `alloy:"format,attr"` + FallbackFormats []string `alloy:"fallback_formats,attr,optional"` + Location *string `alloy:"location,attr,optional"` + ActionOnFailure string `alloy:"action_on_failure,attr,optional"` } type parser func(string) (time.Time, error) diff --git a/internal/component/loki/relabel/relabel.go b/internal/component/loki/relabel/relabel.go index 1d79993dea..1da52e9d77 100644 --- a/internal/component/loki/relabel/relabel.go +++ b/internal/component/loki/relabel/relabel.go @@ -32,13 +32,13 @@ func init() { // component. type Arguments struct { // Where the relabeled metrics should be forwarded to. - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` // The relabelling rules to apply to each log entry before it's forwarded. - RelabelConfigs []*flow_relabel.Config `river:"rule,block,optional"` + RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` // The maximum number of items to hold in the component's LRU cache. - MaxCacheSize int `river:"max_cache_size,attr,optional"` + MaxCacheSize int `alloy:"max_cache_size,attr,optional"` } // DefaultArguments provides the default arguments for the loki.relabel @@ -54,8 +54,8 @@ func (a *Arguments) SetToDefault() { // Exports holds values which are exported by the loki.relabel component. type Exports struct { - Receiver loki.LogsReceiver `river:"receiver,attr"` - Rules flow_relabel.Rules `river:"rules,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` + Rules flow_relabel.Rules `alloy:"rules,attr"` } // Component implements the loki.relabel component. diff --git a/internal/component/loki/relabel/relabel_test.go b/internal/component/loki/relabel/relabel_test.go index 54a3c2f079..bb123fc2a6 100644 --- a/internal/component/loki/relabel/relabel_test.go +++ b/internal/component/loki/relabel/relabel_test.go @@ -45,7 +45,7 @@ func TestRelabeling(t *testing.T) { // an easy way to refer to a loki.LogsReceiver value for the forward_to // argument. type cfg struct { - Rcs []*flow_relabel.Config `river:"rule,block,optional"` + Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg err := syntax.Unmarshal([]byte(rc), &relabelConfigs) @@ -108,7 +108,7 @@ func TestRelabeling(t *testing.T) { func BenchmarkRelabelComponent(b *testing.B) { type cfg struct { - Rcs []*flow_relabel.Config `river:"rule,block,optional"` + Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg _ = syntax.Unmarshal([]byte(rc), &relabelConfigs) @@ -154,7 +154,7 @@ func BenchmarkRelabelComponent(b *testing.B) { func TestCache(t *testing.T) { type cfg struct { - Rcs []*flow_relabel.Config `river:"rule,block,optional"` + Rcs []*flow_relabel.Config `alloy:"rule,block,optional"` } var relabelConfigs cfg err := syntax.Unmarshal([]byte(rc), &relabelConfigs) diff --git a/internal/component/loki/rules/kubernetes/debug.go b/internal/component/loki/rules/kubernetes/debug.go index eb4f587ecd..ced336833f 100644 --- a/internal/component/loki/rules/kubernetes/debug.go +++ b/internal/component/loki/rules/kubernetes/debug.go @@ -3,21 +3,21 @@ package rules import "fmt" type DebugInfo struct { - Error string `river:"error,attr,optional"` - PrometheusRules []DebugK8sPrometheusRule `river:"prometheus_rule,block,optional"` - LokiRuleNamespaces []DebugLokiNamespace `river:"loki_rule_namespace,block,optional"` + Error string `alloy:"error,attr,optional"` + PrometheusRules []DebugK8sPrometheusRule `alloy:"prometheus_rule,block,optional"` + LokiRuleNamespaces []DebugLokiNamespace `alloy:"loki_rule_namespace,block,optional"` } type DebugK8sPrometheusRule struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - UID string `river:"uid,attr"` - NumRuleGroups int `river:"num_rule_groups,attr"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + UID string `alloy:"uid,attr"` + NumRuleGroups int `alloy:"num_rule_groups,attr"` } type DebugLokiNamespace struct { - Name string `river:"name,attr"` - NumRuleGroups int `river:"num_rule_groups,attr"` + Name string `alloy:"name,attr"` + NumRuleGroups int `alloy:"num_rule_groups,attr"` } func (c *Component) DebugInfo() interface{} { diff --git a/internal/component/loki/rules/kubernetes/types.go b/internal/component/loki/rules/kubernetes/types.go index b98db47196..1cb84f96ea 100644 --- a/internal/component/loki/rules/kubernetes/types.go +++ b/internal/component/loki/rules/kubernetes/types.go @@ -9,15 +9,15 @@ import ( ) type Arguments struct { - Address string `river:"address,attr"` - TenantID string `river:"tenant_id,attr,optional"` - UseLegacyRoutes bool `river:"use_legacy_routes,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - SyncInterval time.Duration `river:"sync_interval,attr,optional"` - LokiNameSpacePrefix string `river:"loki_namespace_prefix,attr,optional"` - - RuleSelector kubernetes.LabelSelector `river:"rule_selector,block,optional"` - RuleNamespaceSelector kubernetes.LabelSelector `river:"rule_namespace_selector,block,optional"` + Address string `alloy:"address,attr"` + TenantID string `alloy:"tenant_id,attr,optional"` + UseLegacyRoutes bool `alloy:"use_legacy_routes,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + SyncInterval time.Duration `alloy:"sync_interval,attr,optional"` + LokiNameSpacePrefix string `alloy:"loki_namespace_prefix,attr,optional"` + + RuleSelector kubernetes.LabelSelector `alloy:"rule_selector,block,optional"` + RuleNamespaceSelector kubernetes.LabelSelector `alloy:"rule_namespace_selector,block,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/loki/source/api/api.go b/internal/component/loki/source/api/api.go index 33159ab84b..cf3002ab65 100644 --- a/internal/component/loki/source/api/api.go +++ b/internal/component/loki/source/api/api.go @@ -29,11 +29,11 @@ func init() { } type Arguments struct { - Server *fnet.ServerConfig `river:",squash"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - Labels map[string]string `river:"labels,attr,optional"` - RelabelRules relabel.Rules `river:"relabel_rules,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` + Server *fnet.ServerConfig `alloy:",squash"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` + RelabelRules relabel.Rules `alloy:"relabel_rules,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/source/aws_firehose/component.go b/internal/component/loki/source/aws_firehose/component.go index cd23ceb6a8..7088056e2e 100644 --- a/internal/component/loki/source/aws_firehose/component.go +++ b/internal/component/loki/source/aws_firehose/component.go @@ -34,11 +34,11 @@ func init() { } type Arguments struct { - Server *fnet.ServerConfig `river:",squash"` - AccessKey alloytypes.Secret `river:"access_key,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` + Server *fnet.ServerConfig `alloy:",squash"` + AccessKey alloytypes.Secret `alloy:"access_key,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go index 1a35824c14..fe1c9b451e 100644 --- a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go +++ b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go @@ -33,26 +33,26 @@ func init() { // Arguments holds values which are used to configure the loki.source.azure_event_hubs component. type Arguments struct { - FullyQualifiedNamespace string `river:"fully_qualified_namespace,attr"` - EventHubs []string `river:"event_hubs,attr"` + FullyQualifiedNamespace string `alloy:"fully_qualified_namespace,attr"` + EventHubs []string `alloy:"event_hubs,attr"` - Authentication AzureEventHubsAuthentication `river:"authentication,block"` + Authentication AzureEventHubsAuthentication `alloy:"authentication,block"` - GroupID string `river:"group_id,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - DisallowCustomMessages bool `river:"disallow_custom_messages,attr,optional"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` - Labels map[string]string `river:"labels,attr,optional"` - Assignor string `river:"assignor,attr,optional"` + GroupID string `alloy:"group_id,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + DisallowCustomMessages bool `alloy:"disallow_custom_messages,attr,optional"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + Assignor string `alloy:"assignor,attr,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` } // AzureEventHubsAuthentication describe the configuration for authentication with Azure Event Hub type AzureEventHubsAuthentication struct { - Mechanism string `river:"mechanism,attr"` - Scopes []string `river:"scopes,attr,optional"` - ConnectionString string `river:"connection_string,attr,optional"` + Mechanism string `alloy:"mechanism,attr"` + Scopes []string `alloy:"scopes,attr,optional"` + ConnectionString string `alloy:"connection_string,attr,optional"` } func getDefault() Arguments { diff --git a/internal/component/loki/source/cloudflare/cloudflare.go b/internal/component/loki/source/cloudflare/cloudflare.go index 718ef89e07..2fcdc62ff0 100644 --- a/internal/component/loki/source/cloudflare/cloudflare.go +++ b/internal/component/loki/source/cloudflare/cloudflare.go @@ -38,14 +38,14 @@ func init() { // Arguments holds values which are used to configure the // loki.source.cloudflare component. type Arguments struct { - APIToken alloytypes.Secret `river:"api_token,attr"` - ZoneID string `river:"zone_id,attr"` - Labels map[string]string `river:"labels,attr,optional"` - Workers int `river:"workers,attr,optional"` - PullRange time.Duration `river:"pull_range,attr,optional"` - FieldsType string `river:"fields_type,attr,optional"` - AdditionalFields []string `river:"additional_fields,attr,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + APIToken alloytypes.Secret `alloy:"api_token,attr"` + ZoneID string `alloy:"zone_id,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` + Workers int `alloy:"workers,attr,optional"` + PullRange time.Duration `alloy:"pull_range,attr,optional"` + FieldsType string `alloy:"fields_type,attr,optional"` + AdditionalFields []string `alloy:"additional_fields,attr,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` } // Convert returns a cloudflaretarget Config struct from the Arguments. @@ -196,6 +196,6 @@ func (c *Component) DebugInfo() interface{} { } type targetDebugInfo struct { - Ready bool `river:"ready,attr"` - Details map[string]string `river:"target_info,attr"` + Ready bool `alloy:"ready,attr"` + Details map[string]string `alloy:"target_info,attr"` } diff --git a/internal/component/loki/source/docker/docker.go b/internal/component/loki/source/docker/docker.go index d1b1430db7..809f8e1219 100644 --- a/internal/component/loki/source/docker/docker.go +++ b/internal/component/loki/source/docker/docker.go @@ -53,13 +53,13 @@ const ( // Arguments holds values which are used to configure the loki.source.docker // component. type Arguments struct { - Host string `river:"host,attr"` - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - Labels map[string]string `river:"labels,attr,optional"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` - HTTPClientConfig *types.HTTPClientConfig `river:"http_client_config,block,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Host string `alloy:"host,attr"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + HTTPClientConfig *types.HTTPClientConfig `alloy:"http_client_config,block,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` } // GetDefaultArguments return an instance of Arguments with the optional fields @@ -328,13 +328,13 @@ func (c *Component) DebugInfo() interface{} { } type readerDebugInfo struct { - TargetsInfo []targetInfo `river:"targets_info,block"` + TargetsInfo []targetInfo `alloy:"targets_info,block"` } type targetInfo struct { - ID string `river:"id,attr"` - LastError string `river:"last_error,attr"` - Labels string `river:"labels,attr"` - IsRunning string `river:"is_running,attr"` - ReadOffset string `river:"read_offset,attr"` + ID string `alloy:"id,attr"` + LastError string `alloy:"last_error,attr"` + Labels string `alloy:"labels,attr"` + IsRunning string `alloy:"is_running,attr"` + ReadOffset string `alloy:"read_offset,attr"` } diff --git a/internal/component/loki/source/file/file.go b/internal/component/loki/source/file/file.go index 63598afe86..3c418d9981 100644 --- a/internal/component/loki/source/file/file.go +++ b/internal/component/loki/source/file/file.go @@ -39,17 +39,17 @@ const ( // Arguments holds values which are used to configure the loki.source.file // component. type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - Encoding string `river:"encoding,attr,optional"` - DecompressionConfig DecompressionConfig `river:"decompression,block,optional"` - FileWatch FileWatch `river:"file_watch,block,optional"` - TailFromEnd bool `river:"tail_from_end,attr,optional"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + Encoding string `alloy:"encoding,attr,optional"` + DecompressionConfig DecompressionConfig `alloy:"decompression,block,optional"` + FileWatch FileWatch `alloy:"file_watch,block,optional"` + TailFromEnd bool `alloy:"tail_from_end,attr,optional"` } type FileWatch struct { - MinPollFrequency time.Duration `river:"min_poll_frequency,attr,optional"` - MaxPollFrequency time.Duration `river:"max_poll_frequency,attr,optional"` + MinPollFrequency time.Duration `alloy:"min_poll_frequency,attr,optional"` + MaxPollFrequency time.Duration `alloy:"max_poll_frequency,attr,optional"` } var DefaultArguments = Arguments{ @@ -65,9 +65,9 @@ func (a *Arguments) SetToDefault() { } type DecompressionConfig struct { - Enabled bool `river:"enabled,attr"` - InitialDelay time.Duration `river:"initial_delay,attr,optional"` - Format CompressionFormat `river:"format,attr"` + Enabled bool `alloy:"enabled,attr"` + InitialDelay time.Duration `alloy:"initial_delay,attr,optional"` + Format CompressionFormat `alloy:"format,attr"` } var ( @@ -280,14 +280,14 @@ func (c *Component) DebugInfo() interface{} { } type readerDebugInfo struct { - TargetsInfo []targetInfo `river:"targets_info,block"` + TargetsInfo []targetInfo `alloy:"targets_info,block"` } type targetInfo struct { - Path string `river:"path,attr"` - Labels string `river:"labels,attr"` - IsRunning bool `river:"is_running,attr"` - ReadOffset int64 `river:"read_offset,attr"` + Path string `alloy:"path,attr"` + Labels string `alloy:"labels,attr"` + IsRunning bool `alloy:"is_running,attr"` + ReadOffset int64 `alloy:"read_offset,attr"` } // Returns the elements from set b which are missing from set a diff --git a/internal/component/loki/source/gcplog/gcplog.go b/internal/component/loki/source/gcplog/gcplog.go index 1d8c5aaa6f..f7ebd670ad 100644 --- a/internal/component/loki/source/gcplog/gcplog.go +++ b/internal/component/loki/source/gcplog/gcplog.go @@ -34,10 +34,10 @@ func init() { // Arguments holds values which are used to configure the loki.source.gcplog // component. type Arguments struct { - PullTarget *gcptypes.PullConfig `river:"pull,block,optional"` - PushTarget *gcptypes.PushConfig `river:"push,block,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` + PullTarget *gcptypes.PullConfig `alloy:"pull,block,optional"` + PushTarget *gcptypes.PushConfig `alloy:"push,block,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -171,5 +171,5 @@ func (c *Component) DebugInfo() interface{} { } type targetDebugInfo struct { - Details map[string]string `river:"target_info,attr"` + Details map[string]string `alloy:"target_info,attr"` } diff --git a/internal/component/loki/source/gcplog/gcptypes/gcptypes.go b/internal/component/loki/source/gcplog/gcptypes/gcptypes.go index 3008c7b0d9..dad7d8e0a9 100644 --- a/internal/component/loki/source/gcplog/gcptypes/gcptypes.go +++ b/internal/component/loki/source/gcplog/gcptypes/gcptypes.go @@ -9,20 +9,20 @@ import ( // PullConfig configures a GCPLog target with the 'pull' strategy. type PullConfig struct { - ProjectID string `river:"project_id,attr"` - Subscription string `river:"subscription,attr"` - Labels map[string]string `river:"labels,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - UseFullLine bool `river:"use_full_line,attr,optional"` + ProjectID string `alloy:"project_id,attr"` + Subscription string `alloy:"subscription,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + UseFullLine bool `alloy:"use_full_line,attr,optional"` } // PushConfig configures a GCPLog target with the 'push' strategy. type PushConfig struct { - Server *fnet.ServerConfig `river:",squash"` - PushTimeout time.Duration `river:"push_timeout,attr,optional"` - Labels map[string]string `river:"labels,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - UseFullLine bool `river:"use_full_line,attr,optional"` + Server *fnet.ServerConfig `alloy:",squash"` + PushTimeout time.Duration `alloy:"push_timeout,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + UseFullLine bool `alloy:"use_full_line,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/source/gelf/gelf.go b/internal/component/loki/source/gelf/gelf.go index c1a1815779..ea889322d3 100644 --- a/internal/component/loki/source/gelf/gelf.go +++ b/internal/component/loki/source/gelf/gelf.go @@ -92,10 +92,10 @@ func (c *Component) Update(args component.Arguments) error { // Arguments are the arguments for the component. type Arguments struct { // ListenAddress only supports UDP. - ListenAddress string `river:"listen_address,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` - Receivers []loki.LogsReceiver `river:"forward_to,attr"` + ListenAddress string `alloy:"listen_address,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + Receivers []loki.LogsReceiver `alloy:"forward_to,attr"` } func defaultArgs() Arguments { diff --git a/internal/component/loki/source/heroku/heroku.go b/internal/component/loki/source/heroku/heroku.go index 9acf470109..d551b1cf80 100644 --- a/internal/component/loki/source/heroku/heroku.go +++ b/internal/component/loki/source/heroku/heroku.go @@ -33,11 +33,11 @@ func init() { // Arguments holds values which are used to configure the loki.source.heroku // component. type Arguments struct { - Server *fnet.ServerConfig `river:",squash"` - Labels map[string]string `river:"labels,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` + Server *fnet.ServerConfig `alloy:",squash"` + Labels map[string]string `alloy:"labels,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -187,8 +187,8 @@ func (c *Component) DebugInfo() interface{} { } type readerDebugInfo struct { - Ready bool `river:"ready,attr"` - Address string `river:"address,attr"` + Ready bool `alloy:"ready,attr"` + Address string `alloy:"address,attr"` } func changed(prev, next any) bool { diff --git a/internal/component/loki/source/journal/types.go b/internal/component/loki/source/journal/types.go index 7ba03adab0..648f7e7cfd 100644 --- a/internal/component/loki/source/journal/types.go +++ b/internal/component/loki/source/journal/types.go @@ -9,13 +9,13 @@ import ( // Arguments are the arguments for the component. type Arguments struct { - FormatAsJson bool `river:"format_as_json,attr,optional"` - MaxAge time.Duration `river:"max_age,attr,optional"` - Path string `river:"path,attr,optional"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` - Matches string `river:"matches,attr,optional"` - Receivers []loki.LogsReceiver `river:"forward_to,attr"` - Labels map[string]string `river:"labels,attr,optional"` + FormatAsJson bool `alloy:"format_as_json,attr,optional"` + MaxAge time.Duration `alloy:"max_age,attr,optional"` + Path string `alloy:"path,attr,optional"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` + Matches string `alloy:"matches,attr,optional"` + Receivers []loki.LogsReceiver `alloy:"forward_to,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` } func defaultArgs() Arguments { diff --git a/internal/component/loki/source/kafka/kafka.go b/internal/component/loki/source/kafka/kafka.go index 5ac0240545..a22c3141a5 100644 --- a/internal/component/loki/source/kafka/kafka.go +++ b/internal/component/loki/source/kafka/kafka.go @@ -32,39 +32,39 @@ func init() { // Arguments holds values which are used to configure the loki.source.kafka // component. type Arguments struct { - Brokers []string `river:"brokers,attr"` - Topics []string `river:"topics,attr"` - GroupID string `river:"group_id,attr,optional"` - Assignor string `river:"assignor,attr,optional"` - Version string `river:"version,attr,optional"` - Authentication KafkaAuthentication `river:"authentication,block,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - Labels map[string]string `river:"labels,attr,optional"` - - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` + Brokers []string `alloy:"brokers,attr"` + Topics []string `alloy:"topics,attr"` + GroupID string `alloy:"group_id,attr,optional"` + Assignor string `alloy:"assignor,attr,optional"` + Version string `alloy:"version,attr,optional"` + Authentication KafkaAuthentication `alloy:"authentication,block,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // KafkaAuthentication describe the configuration for authentication with Kafka brokers type KafkaAuthentication struct { - Type string `river:"type,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` - SASLConfig KafkaSASLConfig `river:"sasl_config,block,optional"` + Type string `alloy:"type,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` + SASLConfig KafkaSASLConfig `alloy:"sasl_config,block,optional"` } // KafkaSASLConfig describe the SASL configuration for authentication with Kafka brokers type KafkaSASLConfig struct { - Mechanism string `river:"mechanism,attr,optional"` - User string `river:"user,attr,optional"` - Password alloytypes.Secret `river:"password,attr,optional"` - UseTLS bool `river:"use_tls,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` - OAuthConfig OAuthConfigConfig `river:"oauth_config,block,optional"` + Mechanism string `alloy:"mechanism,attr,optional"` + User string `alloy:"user,attr,optional"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + UseTLS bool `alloy:"use_tls,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` + OAuthConfig OAuthConfigConfig `alloy:"oauth_config,block,optional"` } type OAuthConfigConfig struct { - TokenProvider string `river:"token_provider,attr"` - Scopes []string `river:"scopes,attr"` + TokenProvider string `alloy:"token_provider,attr"` + Scopes []string `alloy:"scopes,attr"` } // DefaultArguments provides the default arguments for a kafka component. diff --git a/internal/component/loki/source/kubernetes/kubernetes.go b/internal/component/loki/source/kubernetes/kubernetes.go index 577f362cb5..012e9082a8 100644 --- a/internal/component/loki/source/kubernetes/kubernetes.go +++ b/internal/component/loki/source/kubernetes/kubernetes.go @@ -38,13 +38,13 @@ func init() { // Arguments holds values which are used to configure the loki.source.kubernetes // component. type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` // Client settings to connect to Kubernetes. - Client commonk8s.ClientArguments `river:"client,block,optional"` + Client commonk8s.ClientArguments `alloy:"client,block,optional"` - Clustering cluster.ComponentBlock `river:"clustering,block,optional"` + Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` } // DefaultArguments holds default settings for loki.source.kubernetes. @@ -269,14 +269,14 @@ func (c *Component) DebugInfo() interface{} { // DebugInfo represents debug information for loki.source.kubernetes. type DebugInfo struct { - Targets []DebugInfoTarget `river:"target,block,optional"` + Targets []DebugInfoTarget `alloy:"target,block,optional"` } // DebugInfoTarget is debug information for an individual target being tailed // for logs. type DebugInfoTarget struct { - Labels map[string]string `river:"labels,attr,optional"` - DiscoveryLabels map[string]string `river:"discovery_labels,attr,optional"` - LastError string `river:"last_error,attr,optional"` - UpdateTime time.Time `river:"update_time,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + DiscoveryLabels map[string]string `alloy:"discovery_labels,attr,optional"` + LastError string `alloy:"last_error,attr,optional"` + UpdateTime time.Time `alloy:"update_time,attr,optional"` } diff --git a/internal/component/loki/source/kubernetes_events/event_controller.go b/internal/component/loki/source/kubernetes_events/event_controller.go index d3416178a7..1a1bd4fdf4 100644 --- a/internal/component/loki/source/kubernetes_events/event_controller.go +++ b/internal/component/loki/source/kubernetes_events/event_controller.go @@ -344,6 +344,6 @@ func (ctrl *eventController) DebugInfo() controllerInfo { } type controllerInfo struct { - Namespace string `river:"namespace,attr"` - LastTimestamp time.Time `river:"last_event_timestamp,attr"` + Namespace string `alloy:"namespace,attr"` + LastTimestamp time.Time `alloy:"last_event_timestamp,attr"` } diff --git a/internal/component/loki/source/kubernetes_events/kubernetes_events.go b/internal/component/loki/source/kubernetes_events/kubernetes_events.go index 8c52be0507..0f860d789b 100644 --- a/internal/component/loki/source/kubernetes_events/kubernetes_events.go +++ b/internal/component/loki/source/kubernetes_events/kubernetes_events.go @@ -41,14 +41,14 @@ func init() { // Arguments holds values which are used to configure the // loki.source.kubernetes_events component. type Arguments struct { - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` - JobName string `river:"job_name,attr,optional"` - Namespaces []string `river:"namespaces,attr,optional"` - LogFormat string `river:"log_format,attr,optional"` + JobName string `alloy:"job_name,attr,optional"` + Namespaces []string `alloy:"namespaces,attr,optional"` + LogFormat string `alloy:"log_format,attr,optional"` // Client settings to connect to Kubernetes. - Client kubernetes.ClientArguments `river:"client,block,optional"` + Client kubernetes.ClientArguments `alloy:"client,block,optional"` } // DefaultArguments holds default settings for loki.source.kubernetes_events. @@ -248,7 +248,7 @@ func getNamespaces(args Arguments) []string { // DebugInfo implements [component.DebugComponent]. func (c *Component) DebugInfo() interface{} { type Info struct { - Controllers []controllerInfo `river:"event_controller,block,optional"` + Controllers []controllerInfo `alloy:"event_controller,block,optional"` } var info Info diff --git a/internal/component/loki/source/podlogs/podlogs.go b/internal/component/loki/source/podlogs/podlogs.go index 2577f8c18a..94323cc16a 100644 --- a/internal/component/loki/source/podlogs/podlogs.go +++ b/internal/component/loki/source/podlogs/podlogs.go @@ -40,15 +40,15 @@ func init() { // Arguments holds values which are used to configure the loki.source.podlogs // component. type Arguments struct { - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` // Client settings to connect to Kubernetes. - Client commonk8s.ClientArguments `river:"client,block,optional"` + Client commonk8s.ClientArguments `alloy:"client,block,optional"` - Selector config.LabelSelector `river:"selector,block,optional"` - NamespaceSelector config.LabelSelector `river:"namespace_selector,block,optional"` + Selector config.LabelSelector `alloy:"selector,block,optional"` + NamespaceSelector config.LabelSelector `alloy:"namespace_selector,block,optional"` - Clustering cluster.ComponentBlock `river:"clustering,block,optional"` + Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` } // DefaultArguments holds default settings for loki.source.kubernetes. @@ -326,6 +326,6 @@ func (c *Component) DebugInfo() interface{} { // DebugInfo stores debug information for loki.source.podlogs. type DebugInfo struct { - DiscoveredPodLogs []DiscoveredPodLogs `river:"pod_logs,block"` - Targets []kubernetes.DebugInfoTarget `river:"target,block,optional"` + DiscoveredPodLogs []DiscoveredPodLogs `alloy:"pod_logs,block"` + Targets []kubernetes.DebugInfoTarget `alloy:"target,block,optional"` } diff --git a/internal/component/loki/source/podlogs/reconciler.go b/internal/component/loki/source/podlogs/reconciler.go index 0ece93eec1..7f79807b6f 100644 --- a/internal/component/loki/source/podlogs/reconciler.go +++ b/internal/component/loki/source/podlogs/reconciler.go @@ -361,24 +361,24 @@ func podReady(pod *corev1.Pod) model.LabelValue { } type DiscoveredPodLogs struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - LastReconcile time.Time `river:"last_reconcile,attr,optional"` - ReconcileError string `river:"reconcile_error,attr,optional"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + LastReconcile time.Time `alloy:"last_reconcile,attr,optional"` + ReconcileError string `alloy:"reconcile_error,attr,optional"` - Pods []DiscoveredPod `river:"pod,block"` + Pods []DiscoveredPod `alloy:"pod,block"` } type DiscoveredPod struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - ReconcileError string `river:"reconcile_error,attr,optional"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + ReconcileError string `alloy:"reconcile_error,attr,optional"` - Containers []DiscoveredContainer `river:"container,block"` + Containers []DiscoveredContainer `alloy:"container,block"` } type DiscoveredContainer struct { - DiscoveredLabels map[string]string `river:"discovered_labels,attr"` - Labels map[string]string `river:"labels,attr"` - ReconcileError string `river:"reconcile_error,attr,optional"` + DiscoveredLabels map[string]string `alloy:"discovered_labels,attr"` + Labels map[string]string `alloy:"labels,attr"` + ReconcileError string `alloy:"reconcile_error,attr,optional"` } diff --git a/internal/component/loki/source/syslog/syslog.go b/internal/component/loki/source/syslog/syslog.go index f207c68dde..296a75453c 100644 --- a/internal/component/loki/source/syslog/syslog.go +++ b/internal/component/loki/source/syslog/syslog.go @@ -29,9 +29,9 @@ func init() { // Arguments holds values which are used to configure the loki.source.syslog // component. type Arguments struct { - SyslogListeners []ListenerConfig `river:"listener,block"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` + SyslogListeners []ListenerConfig `alloy:"listener,block"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + RelabelRules flow_relabel.Rules `alloy:"relabel_rules,attr,optional"` } // Component implements the loki.source.syslog component. @@ -146,14 +146,14 @@ func (c *Component) DebugInfo() interface{} { } type readerDebugInfo struct { - ListenersInfo []listenerInfo `river:"listeners_info,attr"` + ListenersInfo []listenerInfo `alloy:"listeners_info,attr"` } type listenerInfo struct { - Type string `river:"type,attr"` - Ready bool `river:"ready,attr"` - ListenAddress string `river:"listen_address,attr"` - Labels string `river:"labels,attr"` + Type string `alloy:"type,attr"` + Ready bool `alloy:"ready,attr"` + ListenAddress string `alloy:"listen_address,attr"` + Labels string `alloy:"labels,attr"` } func listenersChanged(prev, next []ListenerConfig) bool { diff --git a/internal/component/loki/source/syslog/types.go b/internal/component/loki/source/syslog/types.go index 461404f241..afc617ac32 100644 --- a/internal/component/loki/source/syslog/types.go +++ b/internal/component/loki/source/syslog/types.go @@ -12,15 +12,15 @@ import ( // ListenerConfig defines a syslog listener. type ListenerConfig struct { - ListenAddress string `river:"address,attr"` - ListenProtocol string `river:"protocol,attr,optional"` - IdleTimeout time.Duration `river:"idle_timeout,attr,optional"` - LabelStructuredData bool `river:"label_structured_data,attr,optional"` - Labels map[string]string `river:"labels,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - UseRFC5424Message bool `river:"use_rfc5424_message,attr,optional"` - MaxMessageLength int `river:"max_message_length,attr,optional"` - TLSConfig config.TLSConfig `river:"tls_config,block,optional"` + ListenAddress string `alloy:"address,attr"` + ListenProtocol string `alloy:"protocol,attr,optional"` + IdleTimeout time.Duration `alloy:"idle_timeout,attr,optional"` + LabelStructuredData bool `alloy:"label_structured_data,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + UseRFC5424Message bool `alloy:"use_rfc5424_message,attr,optional"` + MaxMessageLength int `alloy:"max_message_length,attr,optional"` + TLSConfig config.TLSConfig `alloy:"tls_config,block,optional"` } // DefaultListenerConfig provides the default arguments for a syslog listener. diff --git a/internal/component/loki/source/windowsevent/arguments.go b/internal/component/loki/source/windowsevent/arguments.go index 8c26ed1e1e..eff4af2889 100644 --- a/internal/component/loki/source/windowsevent/arguments.go +++ b/internal/component/loki/source/windowsevent/arguments.go @@ -13,17 +13,17 @@ import ( // Arguments holds values which are used to configure the loki.source.windowsevent // component. type Arguments struct { - Locale int `river:"locale,attr,optional"` - EventLogName string `river:"eventlog_name,attr,optional"` - XPathQuery string `river:"xpath_query,attr,optional"` - BookmarkPath string `river:"bookmark_path,attr,optional"` - PollInterval time.Duration `river:"poll_interval,attr,optional"` - ExcludeEventData bool `river:"exclude_event_data,attr,optional"` - ExcludeUserdata bool `river:"exclude_user_data,attr,optional"` - ExcludeEventMessage bool `river:"exclude_event_message,attr,optional"` - UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` - Labels map[string]string `river:"labels,attr,optional"` + Locale int `alloy:"locale,attr,optional"` + EventLogName string `alloy:"eventlog_name,attr,optional"` + XPathQuery string `alloy:"xpath_query,attr,optional"` + BookmarkPath string `alloy:"bookmark_path,attr,optional"` + PollInterval time.Duration `alloy:"poll_interval,attr,optional"` + ExcludeEventData bool `alloy:"exclude_event_data,attr,optional"` + ExcludeUserdata bool `alloy:"exclude_user_data,attr,optional"` + ExcludeEventMessage bool `alloy:"exclude_event_message,attr,optional"` + UseIncomingTimestamp bool `alloy:"use_incoming_timestamp,attr,optional"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` + Labels map[string]string `alloy:"labels,attr,optional"` } func defaultArgs() Arguments { diff --git a/internal/component/loki/write/types.go b/internal/component/loki/write/types.go index 766f521d6b..9c73c88fde 100644 --- a/internal/component/loki/write/types.go +++ b/internal/component/loki/write/types.go @@ -17,19 +17,19 @@ import ( // EndpointOptions describes an individual location to send logs to. type EndpointOptions struct { - Name string `river:"name,attr,optional"` - URL string `river:"url,attr"` - BatchWait time.Duration `river:"batch_wait,attr,optional"` - BatchSize units.Base2Bytes `river:"batch_size,attr,optional"` - RemoteTimeout time.Duration `river:"remote_timeout,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - MinBackoff time.Duration `river:"min_backoff_period,attr,optional"` // start backoff at this level - MaxBackoff time.Duration `river:"max_backoff_period,attr,optional"` // increase exponentially to this level - MaxBackoffRetries int `river:"max_backoff_retries,attr,optional"` // give up after this many; zero means infinite retries - TenantID string `river:"tenant_id,attr,optional"` - RetryOnHTTP429 bool `river:"retry_on_http_429,attr,optional"` - HTTPClientConfig *types.HTTPClientConfig `river:",squash"` - QueueConfig QueueConfig `river:"queue_config,block,optional"` + Name string `alloy:"name,attr,optional"` + URL string `alloy:"url,attr"` + BatchWait time.Duration `alloy:"batch_wait,attr,optional"` + BatchSize units.Base2Bytes `alloy:"batch_size,attr,optional"` + RemoteTimeout time.Duration `alloy:"remote_timeout,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + MinBackoff time.Duration `alloy:"min_backoff_period,attr,optional"` // start backoff at this level + MaxBackoff time.Duration `alloy:"max_backoff_period,attr,optional"` // increase exponentially to this level + MaxBackoffRetries int `alloy:"max_backoff_retries,attr,optional"` // give up after this many; zero means infinite retries + TenantID string `alloy:"tenant_id,attr,optional"` + RetryOnHTTP429 bool `alloy:"retry_on_http_429,attr,optional"` + HTTPClientConfig *types.HTTPClientConfig `alloy:",squash"` + QueueConfig QueueConfig `alloy:"queue_config,block,optional"` } // GetDefaultEndpointOptions defines the default settings for sending logs to a @@ -74,8 +74,8 @@ func (r *EndpointOptions) Validate() error { // QueueConfig controls how the queue logs remote write client is configured. Note that this client is only used when the // loki.write component has WAL support enabled. type QueueConfig struct { - Capacity units.Base2Bytes `river:"capacity,attr,optional"` - DrainTimeout time.Duration `river:"drain_timeout,attr,optional"` + Capacity units.Base2Bytes `alloy:"capacity,attr,optional"` + DrainTimeout time.Duration `alloy:"drain_timeout,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/loki/write/write.go b/internal/component/loki/write/write.go index 1d057bafc8..59b1d8560f 100644 --- a/internal/component/loki/write/write.go +++ b/internal/component/loki/write/write.go @@ -31,20 +31,20 @@ func init() { // Arguments holds values which are used to configure the loki.write component. type Arguments struct { - Endpoints []EndpointOptions `river:"endpoint,block,optional"` - ExternalLabels map[string]string `river:"external_labels,attr,optional"` - MaxStreams int `river:"max_streams,attr,optional"` - WAL WalArguments `river:"wal,block,optional"` + Endpoints []EndpointOptions `alloy:"endpoint,block,optional"` + ExternalLabels map[string]string `alloy:"external_labels,attr,optional"` + MaxStreams int `alloy:"max_streams,attr,optional"` + WAL WalArguments `alloy:"wal,block,optional"` } // WalArguments holds the settings for configuring the Write-Ahead Log (WAL) used // by the underlying remote write client. type WalArguments struct { - Enabled bool `river:"enabled,attr,optional"` - MaxSegmentAge time.Duration `river:"max_segment_age,attr,optional"` - MinReadFrequency time.Duration `river:"min_read_frequency,attr,optional"` - MaxReadFrequency time.Duration `river:"max_read_frequency,attr,optional"` - DrainTimeout time.Duration `river:"drain_timeout,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + MaxSegmentAge time.Duration `alloy:"max_segment_age,attr,optional"` + MinReadFrequency time.Duration `alloy:"min_read_frequency,attr,optional"` + MaxReadFrequency time.Duration `alloy:"max_read_frequency,attr,optional"` + DrainTimeout time.Duration `alloy:"drain_timeout,attr,optional"` } func (wa *WalArguments) Validate() error { @@ -69,7 +69,7 @@ func (wa *WalArguments) SetToDefault() { // Exports holds the receiver that is used to send log entries to the // loki.write component. type Exports struct { - Receiver loki.LogsReceiver `river:"receiver,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` } var ( diff --git a/internal/component/mimir/rules/kubernetes/debug.go b/internal/component/mimir/rules/kubernetes/debug.go index 59b5103858..00828739c7 100644 --- a/internal/component/mimir/rules/kubernetes/debug.go +++ b/internal/component/mimir/rules/kubernetes/debug.go @@ -3,21 +3,21 @@ package rules import "fmt" type DebugInfo struct { - Error string `river:"error,attr,optional"` - PrometheusRules []DebugK8sPrometheusRule `river:"prometheus_rule,block,optional"` - MimirRuleNamespaces []DebugMimirNamespace `river:"mimir_rule_namespace,block,optional"` + Error string `alloy:"error,attr,optional"` + PrometheusRules []DebugK8sPrometheusRule `alloy:"prometheus_rule,block,optional"` + MimirRuleNamespaces []DebugMimirNamespace `alloy:"mimir_rule_namespace,block,optional"` } type DebugK8sPrometheusRule struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - UID string `river:"uid,attr"` - NumRuleGroups int `river:"num_rule_groups,attr"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + UID string `alloy:"uid,attr"` + NumRuleGroups int `alloy:"num_rule_groups,attr"` } type DebugMimirNamespace struct { - Name string `river:"name,attr"` - NumRuleGroups int `river:"num_rule_groups,attr"` + Name string `alloy:"name,attr"` + NumRuleGroups int `alloy:"num_rule_groups,attr"` } func (c *Component) DebugInfo() interface{} { diff --git a/internal/component/mimir/rules/kubernetes/types.go b/internal/component/mimir/rules/kubernetes/types.go index 564d6b4f0e..701751a1db 100644 --- a/internal/component/mimir/rules/kubernetes/types.go +++ b/internal/component/mimir/rules/kubernetes/types.go @@ -9,16 +9,16 @@ import ( ) type Arguments struct { - Address string `river:"address,attr"` - TenantID string `river:"tenant_id,attr,optional"` - UseLegacyRoutes bool `river:"use_legacy_routes,attr,optional"` - PrometheusHTTPPrefix string `river:"prometheus_http_prefix,attr,optional"` - HTTPClientConfig config.HTTPClientConfig `river:",squash"` - SyncInterval time.Duration `river:"sync_interval,attr,optional"` - MimirNameSpacePrefix string `river:"mimir_namespace_prefix,attr,optional"` - - RuleSelector kubernetes.LabelSelector `river:"rule_selector,block,optional"` - RuleNamespaceSelector kubernetes.LabelSelector `river:"rule_namespace_selector,block,optional"` + Address string `alloy:"address,attr"` + TenantID string `alloy:"tenant_id,attr,optional"` + UseLegacyRoutes bool `alloy:"use_legacy_routes,attr,optional"` + PrometheusHTTPPrefix string `alloy:"prometheus_http_prefix,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `alloy:",squash"` + SyncInterval time.Duration `alloy:"sync_interval,attr,optional"` + MimirNameSpacePrefix string `alloy:"mimir_namespace_prefix,attr,optional"` + + RuleSelector kubernetes.LabelSelector `alloy:"rule_selector,block,optional"` + RuleNamespaceSelector kubernetes.LabelSelector `alloy:"rule_namespace_selector,block,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index e608546d11..f1211c2e6f 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -45,7 +45,7 @@ type Arguments interface { type Exports struct { // Handler is the managed component. Handler is updated any time the // extension is updated. - Handler Handler `river:"handler,attr"` + Handler Handler `alloy:"handler,attr"` } // Handler combines an extension with its ID. diff --git a/internal/component/otelcol/auth/basic/basic.go b/internal/component/otelcol/auth/basic/basic.go index 6ccec98a5d..8432724832 100644 --- a/internal/component/otelcol/auth/basic/basic.go +++ b/internal/component/otelcol/auth/basic/basic.go @@ -30,8 +30,8 @@ func init() { type Arguments struct { // TODO(rfratto): should we support htpasswd? - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` } var _ auth.Arguments = Arguments{} diff --git a/internal/component/otelcol/auth/bearer/bearer.go b/internal/component/otelcol/auth/bearer/bearer.go index 7e31dc05ed..43813c48b5 100644 --- a/internal/component/otelcol/auth/bearer/bearer.go +++ b/internal/component/otelcol/auth/bearer/bearer.go @@ -29,8 +29,8 @@ func init() { // Arguments configures the otelcol.auth.bearer component. type Arguments struct { // Do not include the "filename" attribute - users should use local.file instead. - Scheme string `river:"scheme,attr,optional"` - Token alloytypes.Secret `river:"token,attr"` + Scheme string `alloy:"scheme,attr,optional"` + Token alloytypes.Secret `alloy:"token,attr"` } var _ auth.Arguments = Arguments{} diff --git a/internal/component/otelcol/auth/headers/headers.go b/internal/component/otelcol/auth/headers/headers.go index 4eb48e5aa1..f79f8506ca 100644 --- a/internal/component/otelcol/auth/headers/headers.go +++ b/internal/component/otelcol/auth/headers/headers.go @@ -32,7 +32,7 @@ func init() { // Arguments configures the otelcol.auth.headers component. type Arguments struct { - Headers []Header `river:"header,block,optional"` + Headers []Header `alloy:"header,block,optional"` } var _ auth.Arguments = Arguments{} @@ -132,10 +132,10 @@ func (a *Action) UnmarshalText(text []byte) error { // Header is an individual Header to send along with requests. type Header struct { - Key string `river:"key,attr"` - Value *alloytypes.OptionalSecret `river:"value,attr,optional"` - FromContext *string `river:"from_context,attr,optional"` - Action Action `river:"action,attr,optional"` + Key string `alloy:"key,attr"` + Value *alloytypes.OptionalSecret `alloy:"value,attr,optional"` + FromContext *string `alloy:"from_context,attr,optional"` + Action Action `alloy:"action,attr,optional"` } var _ syntax.Defaulter = &Header{} diff --git a/internal/component/otelcol/auth/oauth2/oauth2.go b/internal/component/otelcol/auth/oauth2/oauth2.go index b67deaed89..438096c4a4 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2.go +++ b/internal/component/otelcol/auth/oauth2/oauth2.go @@ -31,15 +31,15 @@ func init() { // Arguments configures the otelcol.auth.oauth2 component. type Arguments struct { - ClientID string `river:"client_id,attr,optional"` - ClientIDFile string `river:"client_id_file,attr,optional"` - ClientSecret alloytypes.Secret `river:"client_secret,attr,optional"` - ClientSecretFile string `river:"client_secret_file,attr,optional"` - TokenURL string `river:"token_url,attr"` - EndpointParams url.Values `river:"endpoint_params,attr,optional"` - Scopes []string `river:"scopes,attr,optional"` - TLSSetting otelcol.TLSClientArguments `river:"tls,block,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + ClientID string `alloy:"client_id,attr,optional"` + ClientIDFile string `alloy:"client_id_file,attr,optional"` + ClientSecret alloytypes.Secret `alloy:"client_secret,attr,optional"` + ClientSecretFile string `alloy:"client_secret_file,attr,optional"` + TokenURL string `alloy:"token_url,attr"` + EndpointParams url.Values `alloy:"endpoint_params,attr,optional"` + Scopes []string `alloy:"scopes,attr,optional"` + TLSSetting otelcol.TLSClientArguments `alloy:"tls,block,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } var _ auth.Arguments = Arguments{} diff --git a/internal/component/otelcol/auth/sigv4/sigv4.go b/internal/component/otelcol/auth/sigv4/sigv4.go index 431b059aa6..e40f08352a 100644 --- a/internal/component/otelcol/auth/sigv4/sigv4.go +++ b/internal/component/otelcol/auth/sigv4/sigv4.go @@ -25,9 +25,9 @@ func init() { // Arguments configures the otelcol.auth.sigv4 component. type Arguments struct { - Region string `river:"region,attr,optional"` - Service string `river:"service,attr,optional"` - AssumeRole AssumeRole `river:"assume_role,block,optional"` + Region string `alloy:"region,attr,optional"` + Service string `alloy:"service,attr,optional"` + AssumeRole AssumeRole `alloy:"assume_role,block,optional"` } var ( @@ -67,9 +67,9 @@ func (args Arguments) Exporters() map[otelcomponent.DataType]map[otelcomponent.I // AssumeRole replicates sigv4authextension.Config.AssumeRole type AssumeRole struct { - ARN string `river:"arn,attr,optional"` - SessionName string `river:"session_name,attr,optional"` - STSRegion string `river:"sts_region,attr,optional"` + ARN string `alloy:"arn,attr,optional"` + SessionName string `alloy:"session_name,attr,optional"` + STSRegion string `alloy:"sts_region,attr,optional"` } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/config_attraction.go b/internal/component/otelcol/config_attraction.go index 908acb523c..b009397112 100644 --- a/internal/component/otelcol/config_attraction.go +++ b/internal/component/otelcol/config_attraction.go @@ -18,11 +18,11 @@ func (actions AttrActionKeyValueSlice) Convert() []interface{} { type AttrActionKeyValue struct { // Key specifies the attribute to act upon. // This is a required field. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // Value specifies the value to populate for the key. // The type of the value is inferred from the configuration. - Value interface{} `river:"value,attr,optional"` + Value interface{} `alloy:"value,attr,optional"` // A regex pattern must be specified for the action EXTRACT. // It uses the attribute specified by `key' to extract values from @@ -32,22 +32,22 @@ type AttrActionKeyValue struct { // Note: All subexpressions must have a name. // Note: The value type of the source key must be a string. If it isn't, // no extraction will occur. - RegexPattern string `river:"pattern,attr,optional"` + RegexPattern string `alloy:"pattern,attr,optional"` // FromAttribute specifies the attribute to use to populate // the value. If the attribute doesn't exist, no action is performed. - FromAttribute string `river:"from_attribute,attr,optional"` + FromAttribute string `alloy:"from_attribute,attr,optional"` // FromContext specifies the context value to use to populate // the value. The values would be searched in client.Info.Metadata. // If the key doesn't exist, no action is performed. // If the key has multiple values the values will be joined with `;` separator. - FromContext string `river:"from_context,attr,optional"` + FromContext string `alloy:"from_context,attr,optional"` // ConvertedType specifies the target type of an attribute to be converted // If the key doesn't exist, no action is performed. // If the value cannot be converted, the original value will be left as-is - ConvertedType string `river:"converted_type,attr,optional"` + ConvertedType string `alloy:"converted_type,attr,optional"` // Action specifies the type of action to perform. // The set of values are {INSERT, UPDATE, UPSERT, DELETE, HASH}. @@ -72,7 +72,7 @@ type AttrActionKeyValue struct { // already exists, it will be overridden. // CONVERT - converts the type of an existing attribute, if convertable // This is a required field. - Action string `river:"action,attr"` + Action string `alloy:"action,attr"` } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/config_debug_metrics.go b/internal/component/otelcol/config_debug_metrics.go index c0a47a9d08..977dc6177e 100644 --- a/internal/component/otelcol/config_debug_metrics.go +++ b/internal/component/otelcol/config_debug_metrics.go @@ -2,7 +2,7 @@ package otelcol // DebugMetricsArguments configures internal metrics of the components type DebugMetricsArguments struct { - DisableHighCardinalityMetrics bool `river:"disable_high_cardinality_metrics,attr,optional"` + DisableHighCardinalityMetrics bool `alloy:"disable_high_cardinality_metrics,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/otelcol/config_filter.go b/internal/component/otelcol/config_filter.go index 826f876971..ae2b7fa2e9 100644 --- a/internal/component/otelcol/config_filter.go +++ b/internal/component/otelcol/config_filter.go @@ -15,15 +15,15 @@ import ( // If both 'include' and 'exclude' are specified, the 'include' properties are checked // before the 'exclude' properties. type MatchConfig struct { - Include *MatchProperties `river:"include,block,optional"` - Exclude *MatchProperties `river:"exclude,block,optional"` + Include *MatchProperties `alloy:"include,block,optional"` + Exclude *MatchProperties `alloy:"exclude,block,optional"` } // MatchProperties specifies the set of properties in a spans/log/metric to match // against and if the input data should be included or excluded from the processor. type MatchProperties struct { - MatchType string `river:"match_type,attr"` - RegexpConfig *RegexpConfig `river:"regexp,block,optional"` + MatchType string `alloy:"match_type,attr"` + RegexpConfig *RegexpConfig `alloy:"regexp,block,optional"` // Note: For spans, one of Services, SpanNames, Attributes, Resources, or Libraries must be specified with a // non-empty value for a valid configuration. @@ -35,41 +35,41 @@ type MatchProperties struct { // Services specify the list of items to match service name against. // A match occurs if the span's service name matches at least one item in this list. - Services []string `river:"services,attr,optional"` + Services []string `alloy:"services,attr,optional"` // SpanNames specify the list of items to match span name against. // A match occurs if the span name matches at least one item in this list. - SpanNames []string `river:"span_names,attr,optional"` + SpanNames []string `alloy:"span_names,attr,optional"` // LogBodies is a list of strings that the LogRecord's body field must match against. - LogBodies []string `river:"log_bodies,attr,optional"` + LogBodies []string `alloy:"log_bodies,attr,optional"` // LogSeverityTexts is a list of strings that the LogRecord's severity text field must match against. - LogSeverityTexts []string `river:"log_severity_texts,attr,optional"` + LogSeverityTexts []string `alloy:"log_severity_texts,attr,optional"` // LogSeverity defines how to match against a log record's SeverityNumber, if defined. - LogSeverity *LogSeverityNumberMatchProperties `river:"log_severity,block,optional"` + LogSeverity *LogSeverityNumberMatchProperties `alloy:"log_severity,block,optional"` // MetricNames is a list of strings to match metric name against. // A match occurs if metric name matches at least one item in the list. - MetricNames []string `river:"metric_names,attr,optional"` + MetricNames []string `alloy:"metric_names,attr,optional"` // Attributes specifies the list of attributes to match against. // All of these attributes must match exactly for a match to occur. // Only match_type=strict is allowed if "attributes" are specified. - Attributes []Attribute `river:"attribute,block,optional"` + Attributes []Attribute `alloy:"attribute,block,optional"` // Resources specify the list of items to match the resources against. // A match occurs if the data's resources match at least one item in this list. - Resources []Attribute `river:"resource,block,optional"` + Resources []Attribute `alloy:"resource,block,optional"` // Libraries specify the list of items to match the implementation library against. // A match occurs if the span's implementation library matches at least one item in this list. - Libraries []InstrumentationLibrary `river:"library,block,optional"` + Libraries []InstrumentationLibrary `alloy:"library,block,optional"` // SpanKinds specify the list of items to match the span kind against. // A match occurs if the span's span kind matches at least one item in this list. - SpanKinds []string `river:"span_kinds,attr,optional"` + SpanKinds []string `alloy:"span_kinds,attr,optional"` } // Convert converts args into the upstream type. @@ -157,10 +157,10 @@ func convertInstrumentationLibrariesSlice(libs []InstrumentationLibrary) []inter type RegexpConfig struct { // CacheEnabled determines whether match results are LRU cached to make subsequent matches faster. // Cache size is unlimited unless CacheMaxNumEntries is also specified. - CacheEnabled bool `river:"cache_enabled,attr,optional"` + CacheEnabled bool `alloy:"cache_enabled,attr,optional"` // CacheMaxNumEntries is the max number of entries of the LRU cache that stores match results. // CacheMaxNumEntries is ignored if CacheEnabled is false. - CacheMaxNumEntries int `river:"cache_max_num_entries,attr,optional"` + CacheMaxNumEntries int `alloy:"cache_max_num_entries,attr,optional"` } func (args RegexpConfig) convert() map[string]interface{} { @@ -173,11 +173,11 @@ func (args RegexpConfig) convert() map[string]interface{} { // Attribute specifies the attribute key and optional value to match against. type Attribute struct { // Key specifies the attribute key. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // Values specifies the value to match against. // If it is not set, any value will match. - Value interface{} `river:"value,attr,optional"` + Value interface{} `alloy:"value,attr,optional"` } func (args Attribute) convert() map[string]interface{} { @@ -189,7 +189,7 @@ func (args Attribute) convert() map[string]interface{} { // InstrumentationLibrary specifies the instrumentation library and optional version to match against. type InstrumentationLibrary struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` // version match // expected actual match // nil yes @@ -198,7 +198,7 @@ type InstrumentationLibrary struct { // 1 no // 1 no // 1 1 yes - Version *string `river:"version,attr,optional"` + Version *string `alloy:"version,attr,optional"` } func (args InstrumentationLibrary) convert() map[string]interface{} { @@ -216,11 +216,11 @@ func (args InstrumentationLibrary) convert() map[string]interface{} { type LogSeverityNumberMatchProperties struct { // Min is the lowest severity that may be matched. // e.g. if this is plog.SeverityNumberInfo, INFO, WARN, ERROR, and FATAL logs will match. - Min SeverityLevel `river:"min,attr"` + Min SeverityLevel `alloy:"min,attr"` // MatchUndefined controls whether logs with "undefined" severity matches. // If this is true, entries with undefined severity will match. - MatchUndefined bool `river:"match_undefined,attr"` + MatchUndefined bool `alloy:"match_undefined,attr"` } func (args LogSeverityNumberMatchProperties) convert() (map[string]interface{}, error) { diff --git a/internal/component/otelcol/config_grpc.go b/internal/component/otelcol/config_grpc.go index da6cca47ed..8d45332554 100644 --- a/internal/component/otelcol/config_grpc.go +++ b/internal/component/otelcol/config_grpc.go @@ -18,17 +18,17 @@ const DefaultBalancerName = "pick_first" // GRPCServerArguments holds shared gRPC settings for components which launch // gRPC servers. type GRPCServerArguments struct { - Endpoint string `river:"endpoint,attr,optional"` - Transport string `river:"transport,attr,optional"` + Endpoint string `alloy:"endpoint,attr,optional"` + Transport string `alloy:"transport,attr,optional"` - TLS *TLSServerArguments `river:"tls,block,optional"` + TLS *TLSServerArguments `alloy:"tls,block,optional"` - MaxRecvMsgSize units.Base2Bytes `river:"max_recv_msg_size,attr,optional"` - MaxConcurrentStreams uint32 `river:"max_concurrent_streams,attr,optional"` - ReadBufferSize units.Base2Bytes `river:"read_buffer_size,attr,optional"` - WriteBufferSize units.Base2Bytes `river:"write_buffer_size,attr,optional"` + MaxRecvMsgSize units.Base2Bytes `alloy:"max_recv_msg_size,attr,optional"` + MaxConcurrentStreams uint32 `alloy:"max_concurrent_streams,attr,optional"` + ReadBufferSize units.Base2Bytes `alloy:"read_buffer_size,attr,optional"` + WriteBufferSize units.Base2Bytes `alloy:"write_buffer_size,attr,optional"` - Keepalive *KeepaliveServerArguments `river:"keepalive,block,optional"` + Keepalive *KeepaliveServerArguments `alloy:"keepalive,block,optional"` // TODO(rfratto): auth // @@ -38,7 +38,7 @@ type GRPCServerArguments struct { // We will need to generally figure out how we want to provide common // authentication extensions to all of our components. - IncludeMetadata bool `river:"include_metadata,attr,optional"` + IncludeMetadata bool `alloy:"include_metadata,attr,optional"` } // Convert converts args into the upstream type. @@ -69,8 +69,8 @@ func (args *GRPCServerArguments) Convert() *otelconfiggrpc.GRPCServerSettings { // KeepaliveServerArguments holds shared keepalive settings for components // which launch servers. type KeepaliveServerArguments struct { - ServerParameters *KeepaliveServerParamaters `river:"server_parameters,block,optional"` - EnforcementPolicy *KeepaliveEnforcementPolicy `river:"enforcement_policy,block,optional"` + ServerParameters *KeepaliveServerParamaters `alloy:"server_parameters,block,optional"` + EnforcementPolicy *KeepaliveEnforcementPolicy `alloy:"enforcement_policy,block,optional"` } // Convert converts args into the upstream type. @@ -88,11 +88,11 @@ func (args *KeepaliveServerArguments) Convert() *otelconfiggrpc.KeepaliveServerC // KeepaliveServerParamaters holds shared keepalive settings for components // which launch servers. type KeepaliveServerParamaters struct { - MaxConnectionIdle time.Duration `river:"max_connection_idle,attr,optional"` - MaxConnectionAge time.Duration `river:"max_connection_age,attr,optional"` - MaxConnectionAgeGrace time.Duration `river:"max_connection_age_grace,attr,optional"` - Time time.Duration `river:"time,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + MaxConnectionIdle time.Duration `alloy:"max_connection_idle,attr,optional"` + MaxConnectionAge time.Duration `alloy:"max_connection_age,attr,optional"` + MaxConnectionAgeGrace time.Duration `alloy:"max_connection_age_grace,attr,optional"` + Time time.Duration `alloy:"time,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } // Convert converts args into the upstream type. @@ -113,8 +113,8 @@ func (args *KeepaliveServerParamaters) Convert() *otelconfiggrpc.KeepaliveServer // KeepaliveEnforcementPolicy holds shared keepalive settings for components // which launch servers. type KeepaliveEnforcementPolicy struct { - MinTime time.Duration `river:"min_time,attr,optional"` - PermitWithoutStream bool `river:"permit_without_stream,attr,optional"` + MinTime time.Duration `alloy:"min_time,attr,optional"` + PermitWithoutStream bool `alloy:"permit_without_stream,attr,optional"` } // Convert converts args into the upstream type. @@ -134,23 +134,23 @@ func (args *KeepaliveEnforcementPolicy) Convert() *otelconfiggrpc.KeepaliveEnfor // NOTE: When changing this structure, note that similar structures such as // loadbalancing.GRPCClientArguments may also need to be changed. type GRPCClientArguments struct { - Endpoint string `river:"endpoint,attr"` + Endpoint string `alloy:"endpoint,attr"` - Compression CompressionType `river:"compression,attr,optional"` + Compression CompressionType `alloy:"compression,attr,optional"` - TLS TLSClientArguments `river:"tls,block,optional"` - Keepalive *KeepaliveClientArguments `river:"keepalive,block,optional"` + TLS TLSClientArguments `alloy:"tls,block,optional"` + Keepalive *KeepaliveClientArguments `alloy:"keepalive,block,optional"` - ReadBufferSize units.Base2Bytes `river:"read_buffer_size,attr,optional"` - WriteBufferSize units.Base2Bytes `river:"write_buffer_size,attr,optional"` - WaitForReady bool `river:"wait_for_ready,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - BalancerName string `river:"balancer_name,attr,optional"` - Authority string `river:"authority,attr,optional"` + ReadBufferSize units.Base2Bytes `alloy:"read_buffer_size,attr,optional"` + WriteBufferSize units.Base2Bytes `alloy:"write_buffer_size,attr,optional"` + WaitForReady bool `alloy:"wait_for_ready,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + BalancerName string `alloy:"balancer_name,attr,optional"` + Authority string `alloy:"authority,attr,optional"` // Auth is a binding to an otelcol.auth.* component extension which handles // authentication. - Auth *auth.Handler `river:"auth,attr,optional"` + Auth *auth.Handler `alloy:"auth,attr,optional"` } // Convert converts args into the upstream type. @@ -207,9 +207,9 @@ func (args *GRPCClientArguments) Extensions() map[otelcomponent.ID]otelextension // KeepaliveClientArguments holds shared keepalive settings for components // which launch clients. type KeepaliveClientArguments struct { - PingWait time.Duration `river:"ping_wait,attr,optional"` - PingResponseTimeout time.Duration `river:"ping_response_timeout,attr,optional"` - PingWithoutStream bool `river:"ping_without_stream,attr,optional"` + PingWait time.Duration `alloy:"ping_wait,attr,optional"` + PingResponseTimeout time.Duration `alloy:"ping_response_timeout,attr,optional"` + PingWithoutStream bool `alloy:"ping_without_stream,attr,optional"` } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/config_http.go b/internal/component/otelcol/config_http.go index f94da5cd84..934a2a488c 100644 --- a/internal/component/otelcol/config_http.go +++ b/internal/component/otelcol/config_http.go @@ -15,11 +15,11 @@ import ( // HTTPServerArguments holds shared settings for components which launch HTTP // servers. type HTTPServerArguments struct { - Endpoint string `river:"endpoint,attr,optional"` + Endpoint string `alloy:"endpoint,attr,optional"` - TLS *TLSServerArguments `river:"tls,block,optional"` + TLS *TLSServerArguments `alloy:"tls,block,optional"` - CORS *CORSArguments `river:"cors,block,optional"` + CORS *CORSArguments `alloy:"cors,block,optional"` // TODO(rfratto): auth // @@ -29,8 +29,8 @@ type HTTPServerArguments struct { // We will need to generally figure out how we want to provide common // authentication extensions to all of our components. - MaxRequestBodySize units.Base2Bytes `river:"max_request_body_size,attr,optional"` - IncludeMetadata bool `river:"include_metadata,attr,optional"` + MaxRequestBodySize units.Base2Bytes `alloy:"max_request_body_size,attr,optional"` + IncludeMetadata bool `alloy:"include_metadata,attr,optional"` } // Convert converts args into the upstream type. @@ -51,10 +51,10 @@ func (args *HTTPServerArguments) Convert() *otelconfighttp.HTTPServerSettings { // CORSArguments holds shared CORS settings for components which launch HTTP // servers. type CORSArguments struct { - AllowedOrigins []string `river:"allowed_origins,attr,optional"` - AllowedHeaders []string `river:"allowed_headers,attr,optional"` + AllowedOrigins []string `alloy:"allowed_origins,attr,optional"` + AllowedHeaders []string `alloy:"allowed_headers,attr,optional"` - MaxAge int `river:"max_age,attr,optional"` + MaxAge int `alloy:"max_age,attr,optional"` } // Convert converts args into the upstream type. @@ -74,26 +74,26 @@ func (args *CORSArguments) Convert() *otelconfighttp.CORSSettings { // HTTPClientArguments holds shared HTTP settings for components which launch // HTTP clients. type HTTPClientArguments struct { - Endpoint string `river:"endpoint,attr"` + Endpoint string `alloy:"endpoint,attr"` - Compression CompressionType `river:"compression,attr,optional"` + Compression CompressionType `alloy:"compression,attr,optional"` - TLS TLSClientArguments `river:"tls,block,optional"` + TLS TLSClientArguments `alloy:"tls,block,optional"` - ReadBufferSize units.Base2Bytes `river:"read_buffer_size,attr,optional"` - WriteBufferSize units.Base2Bytes `river:"write_buffer_size,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` + ReadBufferSize units.Base2Bytes `alloy:"read_buffer_size,attr,optional"` + WriteBufferSize units.Base2Bytes `alloy:"write_buffer_size,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` // CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) TODO (@tpaschalis) - MaxIdleConns *int `river:"max_idle_conns,attr,optional"` - MaxIdleConnsPerHost *int `river:"max_idle_conns_per_host,attr,optional"` - MaxConnsPerHost *int `river:"max_conns_per_host,attr,optional"` - IdleConnTimeout *time.Duration `river:"idle_conn_timeout,attr,optional"` - DisableKeepAlives bool `river:"disable_keep_alives,attr,optional"` + MaxIdleConns *int `alloy:"max_idle_conns,attr,optional"` + MaxIdleConnsPerHost *int `alloy:"max_idle_conns_per_host,attr,optional"` + MaxConnsPerHost *int `alloy:"max_conns_per_host,attr,optional"` + IdleConnTimeout *time.Duration `alloy:"idle_conn_timeout,attr,optional"` + DisableKeepAlives bool `alloy:"disable_keep_alives,attr,optional"` // Auth is a binding to an otelcol.auth.* component extension which handles // authentication. - Auth *auth.Handler `river:"auth,attr,optional"` + Auth *auth.Handler `alloy:"auth,attr,optional"` } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/config_k8s.go b/internal/component/otelcol/config_k8s.go index b20407fd41..546d851ef1 100644 --- a/internal/component/otelcol/config_k8s.go +++ b/internal/component/otelcol/config_k8s.go @@ -15,10 +15,10 @@ type KubernetesAPIConfig struct { // (for no auth), `serviceAccount` (to use the standard service account // token provided to the agent pod), or `kubeConfig` to use credentials // from `~/.kube/config`. - AuthType string `river:"auth_type,attr,optional"` + AuthType string `alloy:"auth_type,attr,optional"` // When using auth_type `kubeConfig`, override the current context. - Context string `river:"context,attr,optional"` + Context string `alloy:"context,attr,optional"` } // Validate returns an error if the config is invalid. diff --git a/internal/component/otelcol/config_queue.go b/internal/component/otelcol/config_queue.go index a15a7983b8..e3ed5e35ea 100644 --- a/internal/component/otelcol/config_queue.go +++ b/internal/component/otelcol/config_queue.go @@ -9,9 +9,9 @@ import ( // QueueArguments holds shared settings for components which can queue // requests. type QueueArguments struct { - Enabled bool `river:"enabled,attr,optional"` - NumConsumers int `river:"num_consumers,attr,optional"` - QueueSize int `river:"queue_size,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + NumConsumers int `alloy:"num_consumers,attr,optional"` + QueueSize int `alloy:"queue_size,attr,optional"` // TODO(rfratto): queues can send to persistent storage through an extension. } diff --git a/internal/component/otelcol/config_retry.go b/internal/component/otelcol/config_retry.go index df2fb07720..284ddd58a8 100644 --- a/internal/component/otelcol/config_retry.go +++ b/internal/component/otelcol/config_retry.go @@ -11,12 +11,12 @@ import ( // RetryArguments holds shared settings for components which can retry // requests. type RetryArguments struct { - Enabled bool `river:"enabled,attr,optional"` - InitialInterval time.Duration `river:"initial_interval,attr,optional"` - RandomizationFactor float64 `river:"randomization_factor,attr,optional"` - Multiplier float64 `river:"multiplier,attr,optional"` - MaxInterval time.Duration `river:"max_interval,attr,optional"` - MaxElapsedTime time.Duration `river:"max_elapsed_time,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + InitialInterval time.Duration `alloy:"initial_interval,attr,optional"` + RandomizationFactor float64 `alloy:"randomization_factor,attr,optional"` + Multiplier float64 `alloy:"multiplier,attr,optional"` + MaxInterval time.Duration `alloy:"max_interval,attr,optional"` + MaxElapsedTime time.Duration `alloy:"max_elapsed_time,attr,optional"` } var ( diff --git a/internal/component/otelcol/config_scrape.go b/internal/component/otelcol/config_scrape.go index 60f30ae946..24b18d2347 100644 --- a/internal/component/otelcol/config_scrape.go +++ b/internal/component/otelcol/config_scrape.go @@ -16,9 +16,9 @@ var ( // ScraperControllerArguments defines common settings for a scraper controller // configuration. type ScraperControllerArguments struct { - CollectionInterval time.Duration `river:"collection_interval,attr,optional"` - InitialDelay time.Duration `river:"initial_delay,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + CollectionInterval time.Duration `alloy:"collection_interval,attr,optional"` + InitialDelay time.Duration `alloy:"initial_delay,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } // DefaultScraperControllerArguments holds default settings for ScraperControllerArguments. diff --git a/internal/component/otelcol/config_tls.go b/internal/component/otelcol/config_tls.go index 04a3dbd653..b3f6091cc8 100644 --- a/internal/component/otelcol/config_tls.go +++ b/internal/component/otelcol/config_tls.go @@ -12,9 +12,9 @@ import ( // TLSServerArguments holds shared TLS settings for components which launch // servers with TLS. type TLSServerArguments struct { - TLSSetting TLSSetting `river:",squash"` + TLSSetting TLSSetting `alloy:",squash"` - ClientCAFile string `river:"client_ca_file,attr,optional"` + ClientCAFile string `alloy:"client_ca_file,attr,optional"` } // Convert converts args into the upstream type. @@ -32,11 +32,11 @@ func (args *TLSServerArguments) Convert() *otelconfigtls.TLSServerSetting { // TLSClientArguments holds shared TLS settings for components which launch // TLS clients. type TLSClientArguments struct { - TLSSetting TLSSetting `river:",squash"` + TLSSetting TLSSetting `alloy:",squash"` - Insecure bool `river:"insecure,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - ServerName string `river:"server_name,attr,optional"` + Insecure bool `alloy:"insecure,attr,optional"` + InsecureSkipVerify bool `alloy:"insecure_skip_verify,attr,optional"` + ServerName string `alloy:"server_name,attr,optional"` } // Convert converts args into the upstream type. @@ -54,15 +54,15 @@ func (args *TLSClientArguments) Convert() *otelconfigtls.TLSClientSetting { } type TLSSetting struct { - CA string `river:"ca_pem,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - Cert string `river:"cert_pem,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - Key alloytypes.Secret `river:"key_pem,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - MinVersion string `river:"min_version,attr,optional"` - MaxVersion string `river:"max_version,attr,optional"` - ReloadInterval time.Duration `river:"reload_interval,attr,optional"` + CA string `alloy:"ca_pem,attr,optional"` + CAFile string `alloy:"ca_file,attr,optional"` + Cert string `alloy:"cert_pem,attr,optional"` + CertFile string `alloy:"cert_file,attr,optional"` + Key alloytypes.Secret `alloy:"key_pem,attr,optional"` + KeyFile string `alloy:"key_file,attr,optional"` + MinVersion string `alloy:"min_version,attr,optional"` + MaxVersion string `alloy:"max_version,attr,optional"` + ReloadInterval time.Duration `alloy:"reload_interval,attr,optional"` } func (args *TLSSetting) Convert() *otelconfigtls.TLSSetting { diff --git a/internal/component/otelcol/connector/host_info/host_info.go b/internal/component/otelcol/connector/host_info/host_info.go index 5a32e297d4..c756b80bfd 100644 --- a/internal/component/otelcol/connector/host_info/host_info.go +++ b/internal/component/otelcol/connector/host_info/host_info.go @@ -30,11 +30,11 @@ func init() { // Arguments configures the otelcol.connector.host_info component. type Arguments struct { - HostIdentifiers []string `river:"host_identifiers,attr,optional"` - MetricsFlushInterval time.Duration `river:"metrics_flush_interval,attr,optional"` + HostIdentifiers []string `alloy:"host_identifiers,attr,optional"` + MetricsFlushInterval time.Duration `alloy:"metrics_flush_interval,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index 30f43c6dd9..1d0298f45a 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -32,7 +32,7 @@ func init() { // Arguments configures the otelcol.connector.servicegraph component. type Arguments struct { // LatencyHistogramBuckets is the list of durations representing latency histogram buckets. - LatencyHistogramBuckets []time.Duration `river:"latency_histogram_buckets,attr,optional"` + LatencyHistogramBuckets []time.Duration `alloy:"latency_histogram_buckets,attr,optional"` // Dimensions defines the list of additional dimensions on top of the provided: // - client @@ -41,28 +41,28 @@ type Arguments struct { // - connection_type // The dimensions will be fetched from the span's attributes. Examples of some conventionally used attributes: // https://github.com/open-telemetry/opentelemetry-collector/blob/main/model/semconv/opentelemetry.go. - Dimensions []string `river:"dimensions,attr,optional"` + Dimensions []string `alloy:"dimensions,attr,optional"` // Store contains the config for the in-memory store used to find requests between services by pairing spans. - Store StoreConfig `river:"store,block,optional"` + Store StoreConfig `alloy:"store,block,optional"` // CacheLoop defines how often to clean the cache of stale series. - CacheLoop time.Duration `river:"cache_loop,attr,optional"` + CacheLoop time.Duration `alloy:"cache_loop,attr,optional"` // StoreExpirationLoop defines how often to expire old entries from the store. - StoreExpirationLoop time.Duration `river:"store_expiration_loop,attr,optional"` + StoreExpirationLoop time.Duration `alloy:"store_expiration_loop,attr,optional"` // VirtualNodePeerAttributes the list of attributes need to match, the higher the front, the higher the priority. //TODO: Add VirtualNodePeerAttributes when it's no longer controlled by // the "processor.servicegraph.virtualNode" feature gate. - // VirtualNodePeerAttributes []string `river:"virtual_node_peer_attributes,attr,optional"` + // VirtualNodePeerAttributes []string `alloy:"virtual_node_peer_attributes,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } type StoreConfig struct { // MaxItems is the maximum number of items to keep in the store. - MaxItems int `river:"max_items,attr,optional"` + MaxItems int `alloy:"max_items,attr,optional"` // TTL is the time to live for items in the store. - TTL time.Duration `river:"ttl,attr,optional"` + TTL time.Duration `alloy:"ttl,attr,optional"` } func (sc *StoreConfig) SetToDefault() { diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs.go b/internal/component/otelcol/connector/spanlogs/spanlogs.go index a6622090b9..bc749c02d8 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs.go @@ -29,25 +29,25 @@ func init() { // Arguments configures the otelcol.connector.spanlogs component. type Arguments struct { - Spans bool `river:"spans,attr,optional"` - Roots bool `river:"roots,attr,optional"` - Processes bool `river:"processes,attr,optional"` - SpanAttributes []string `river:"span_attributes,attr,optional"` - ProcessAttributes []string `river:"process_attributes,attr,optional"` - Overrides OverrideConfig `river:"overrides,block,optional"` - Labels []string `river:"labels,attr,optional"` + Spans bool `alloy:"spans,attr,optional"` + Roots bool `alloy:"roots,attr,optional"` + Processes bool `alloy:"processes,attr,optional"` + SpanAttributes []string `alloy:"span_attributes,attr,optional"` + ProcessAttributes []string `alloy:"process_attributes,attr,optional"` + Overrides OverrideConfig `alloy:"overrides,block,optional"` + Labels []string `alloy:"labels,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } type OverrideConfig struct { - LogsTag string `river:"logs_instance_tag,attr,optional"` - ServiceKey string `river:"service_key,attr,optional"` - SpanNameKey string `river:"span_name_key,attr,optional"` - StatusKey string `river:"status_key,attr,optional"` - DurationKey string `river:"duration_key,attr,optional"` - TraceIDKey string `river:"trace_id_key,attr,optional"` + LogsTag string `alloy:"logs_instance_tag,attr,optional"` + ServiceKey string `alloy:"service_key,attr,optional"` + SpanNameKey string `alloy:"span_name_key,attr,optional"` + StatusKey string `alloy:"status_key,attr,optional"` + DurationKey string `alloy:"duration_key,attr,optional"` + TraceIDKey string `alloy:"trace_id_key,attr,optional"` } var ( diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index 1c3b8f313e..4de3a55f12 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -38,28 +38,28 @@ type Arguments struct { // - status.code // The dimensions will be fetched from the span's attributes. Examples of some conventionally used attributes: // https://github.com/open-telemetry/opentelemetry-collector/blob/main/model/semconv/opentelemetry.go. - Dimensions []Dimension `river:"dimension,block,optional"` - ExcludeDimensions []string `river:"exclude_dimensions,attr,optional"` + Dimensions []Dimension `alloy:"dimension,block,optional"` + ExcludeDimensions []string `alloy:"exclude_dimensions,attr,optional"` // DimensionsCacheSize defines the size of cache for storing Dimensions, which helps to avoid cache memory growing // indefinitely over the lifetime of the collector. - DimensionsCacheSize int `river:"dimensions_cache_size,attr,optional"` + DimensionsCacheSize int `alloy:"dimensions_cache_size,attr,optional"` - AggregationTemporality string `river:"aggregation_temporality,attr,optional"` + AggregationTemporality string `alloy:"aggregation_temporality,attr,optional"` - Histogram HistogramConfig `river:"histogram,block"` + Histogram HistogramConfig `alloy:"histogram,block"` // MetricsEmitInterval is the time period between when metrics are flushed or emitted to the downstream components. - MetricsFlushInterval time.Duration `river:"metrics_flush_interval,attr,optional"` + MetricsFlushInterval time.Duration `alloy:"metrics_flush_interval,attr,optional"` // Namespace is the namespace of the metrics emitted by the connector. - Namespace string `river:"namespace,attr,optional"` + Namespace string `alloy:"namespace,attr,optional"` // Exemplars defines the configuration for exemplars. - Exemplars ExemplarsConfig `river:"exemplars,block,optional"` + Exemplars ExemplarsConfig `alloy:"exemplars,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/connector/spanmetrics/types.go b/internal/component/otelcol/connector/spanmetrics/types.go index a604efaab8..96e6dcc7cb 100644 --- a/internal/component/otelcol/connector/spanmetrics/types.go +++ b/internal/component/otelcol/connector/spanmetrics/types.go @@ -12,8 +12,8 @@ import ( // Dimension defines the dimension name and optional default value if the Dimension is missing from a span attribute. type Dimension struct { - Name string `river:"name,attr"` - Default *string `river:"default,attr,optional"` + Name string `alloy:"name,attr"` + Default *string `alloy:"default,attr,optional"` } func (d Dimension) Convert() spanmetricsconnector.Dimension { @@ -56,10 +56,10 @@ func ConvertMetricUnit(unit string) (map[string]interface{}, error) { } type HistogramConfig struct { - Disable bool `river:"disable,attr,optional"` - Unit string `river:"unit,attr,optional"` - Exponential *ExponentialHistogramConfig `river:"exponential,block,optional"` - Explicit *ExplicitHistogramConfig `river:"explicit,block,optional"` + Disable bool `alloy:"disable,attr,optional"` + Unit string `alloy:"unit,attr,optional"` + Exponential *ExponentialHistogramConfig `alloy:"exponential,block,optional"` + Explicit *ExplicitHistogramConfig `alloy:"explicit,block,optional"` } var ( @@ -123,7 +123,7 @@ func (hc HistogramConfig) Convert() (*spanmetricsconnector.HistogramConfig, erro } type ExemplarsConfig struct { - Enabled bool `river:"enabled,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` } func (ec ExemplarsConfig) Convert() *spanmetricsconnector.ExemplarsConfig { @@ -133,7 +133,7 @@ func (ec ExemplarsConfig) Convert() *spanmetricsconnector.ExemplarsConfig { } type ExponentialHistogramConfig struct { - MaxSize int32 `river:"max_size,attr,optional"` + MaxSize int32 `alloy:"max_size,attr,optional"` } var ( @@ -163,7 +163,7 @@ func (ehc ExponentialHistogramConfig) Convert() *spanmetricsconnector.Exponentia type ExplicitHistogramConfig struct { // Buckets is the list of durations representing explicit histogram buckets. - Buckets []time.Duration `river:"buckets,attr,optional"` + Buckets []time.Duration `alloy:"buckets,attr,optional"` } var ( diff --git a/internal/component/otelcol/consumer.go b/internal/component/otelcol/consumer.go index edb0fb4fbc..c7a385ba6f 100644 --- a/internal/component/otelcol/consumer.go +++ b/internal/component/otelcol/consumer.go @@ -18,13 +18,13 @@ type Consumer interface { // It is expected to use ConsumerArguments as a block within the top-level // arguments block for a component. type ConsumerArguments struct { - Metrics []Consumer `river:"metrics,attr,optional"` - Logs []Consumer `river:"logs,attr,optional"` - Traces []Consumer `river:"traces,attr,optional"` + Metrics []Consumer `alloy:"metrics,attr,optional"` + Logs []Consumer `alloy:"logs,attr,optional"` + Traces []Consumer `alloy:"traces,attr,optional"` } // ConsumerExports is a common Exports type for Flow components which are // otelcol processors or otelcol exporters. type ConsumerExports struct { - Input Consumer `river:"input,attr"` + Input Consumer `alloy:"input,attr"` } diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index ab9536ea5b..70bad94765 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -41,12 +41,12 @@ func init() { // Arguments configures the otelcol.exporter.loadbalancing component. type Arguments struct { - Protocol Protocol `river:"protocol,block"` - Resolver ResolverSettings `river:"resolver,block"` - RoutingKey string `river:"routing_key,attr,optional"` + Protocol Protocol `alloy:"protocol,block"` + Resolver ResolverSettings `alloy:"resolver,block"` + RoutingKey string `alloy:"routing_key,attr,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` } var ( @@ -91,7 +91,7 @@ func (args Arguments) Convert() (otelcomponent.Config, error) { // Protocol holds the individual protocol-specific settings. Only OTLP is supported at the moment. type Protocol struct { - OTLP OtlpConfig `river:"otlp,block"` + OTLP OtlpConfig `alloy:"otlp,block"` } func (protocol Protocol) Convert() loadbalancingexporter.Protocol { @@ -102,12 +102,12 @@ func (protocol Protocol) Convert() loadbalancingexporter.Protocol { // OtlpConfig defines the config for an OTLP exporter type OtlpConfig struct { - Timeout time.Duration `river:"timeout,attr,optional"` - Queue otelcol.QueueArguments `river:"queue,block,optional"` - Retry otelcol.RetryArguments `river:"retry,block,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + Queue otelcol.QueueArguments `alloy:"queue,block,optional"` + Retry otelcol.RetryArguments `alloy:"retry,block,optional"` // Most of the time, the user will not have to set anything in the client block. // However, the block should not be "optional" so that the defaults are populated. - Client GRPCClientArguments `river:"client,block"` + Client GRPCClientArguments `alloy:"client,block"` } func (oc *OtlpConfig) SetToDefault() { @@ -132,9 +132,9 @@ func (oc OtlpConfig) Convert() otlpexporter.Config { // ResolverSettings defines the configurations for the backend resolver type ResolverSettings struct { - Static *StaticResolver `river:"static,block,optional"` - DNS *DNSResolver `river:"dns,block,optional"` - Kubernetes *KubernetesResolver `river:"kubernetes,block,optional"` + Static *StaticResolver `alloy:"static,block,optional"` + DNS *DNSResolver `alloy:"dns,block,optional"` + Kubernetes *KubernetesResolver `alloy:"kubernetes,block,optional"` } func (resolverSettings ResolverSettings) Convert() loadbalancingexporter.ResolverSettings { @@ -160,7 +160,7 @@ func (resolverSettings ResolverSettings) Convert() loadbalancingexporter.Resolve // StaticResolver defines the configuration for the resolver providing a fixed list of backends type StaticResolver struct { - Hostnames []string `river:"hostnames,attr"` + Hostnames []string `alloy:"hostnames,attr"` } func (staticResolver StaticResolver) Convert() loadbalancingexporter.StaticResolver { @@ -171,10 +171,10 @@ func (staticResolver StaticResolver) Convert() loadbalancingexporter.StaticResol // DNSResolver defines the configuration for the DNS resolver type DNSResolver struct { - Hostname string `river:"hostname,attr"` - Port string `river:"port,attr,optional"` - Interval time.Duration `river:"interval,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + Hostname string `alloy:"hostname,attr"` + Port string `alloy:"port,attr,optional"` + Interval time.Duration `alloy:"interval,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } var _ syntax.Defaulter = &DNSResolver{} @@ -202,8 +202,8 @@ func (dnsResolver *DNSResolver) Convert() loadbalancingexporter.DNSResolver { // KubernetesResolver defines the configuration for the k8s resolver type KubernetesResolver struct { - Service string `river:"service,attr"` - Ports []int32 `river:"ports,attr,optional"` + Service string `alloy:"service,attr"` + Ports []int32 `alloy:"ports,attr,optional"` } var _ syntax.Defaulter = &KubernetesResolver{} @@ -240,21 +240,21 @@ func (args Arguments) DebugMetricsConfig() otelcol.DebugMetricsArguments { // GRPCClientArguments is the same as otelcol.GRPCClientArguments, but without an "endpoint" attribute type GRPCClientArguments struct { - Compression otelcol.CompressionType `river:"compression,attr,optional"` + Compression otelcol.CompressionType `alloy:"compression,attr,optional"` - TLS otelcol.TLSClientArguments `river:"tls,block,optional"` - Keepalive *otelcol.KeepaliveClientArguments `river:"keepalive,block,optional"` + TLS otelcol.TLSClientArguments `alloy:"tls,block,optional"` + Keepalive *otelcol.KeepaliveClientArguments `alloy:"keepalive,block,optional"` - ReadBufferSize units.Base2Bytes `river:"read_buffer_size,attr,optional"` - WriteBufferSize units.Base2Bytes `river:"write_buffer_size,attr,optional"` - WaitForReady bool `river:"wait_for_ready,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - BalancerName string `river:"balancer_name,attr,optional"` - Authority string `river:"authority,attr,optional"` + ReadBufferSize units.Base2Bytes `alloy:"read_buffer_size,attr,optional"` + WriteBufferSize units.Base2Bytes `alloy:"write_buffer_size,attr,optional"` + WaitForReady bool `alloy:"wait_for_ready,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + BalancerName string `alloy:"balancer_name,attr,optional"` + Authority string `alloy:"authority,attr,optional"` // Auth is a binding to an otelcol.auth.* component extension which handles // authentication. - Auth *auth.Handler `river:"auth,attr,optional"` + Auth *auth.Handler `alloy:"auth,attr,optional"` } var _ syntax.Defaulter = &GRPCClientArguments{} diff --git a/internal/component/otelcol/exporter/logging/logging.go b/internal/component/otelcol/exporter/logging/logging.go index 1e08fe2104..3e09afa09d 100644 --- a/internal/component/otelcol/exporter/logging/logging.go +++ b/internal/component/otelcol/exporter/logging/logging.go @@ -28,12 +28,12 @@ func init() { // Arguments configures the otelcol.exporter.logging component. type Arguments struct { - Verbosity configtelemetry.Level `river:"verbosity,attr,optional"` - SamplingInitial int `river:"sampling_initial,attr,optional"` - SamplingThereafter int `river:"sampling_thereafter,attr,optional"` + Verbosity configtelemetry.Level `alloy:"verbosity,attr,optional"` + SamplingInitial int `alloy:"sampling_initial,attr,optional"` + SamplingThereafter int `alloy:"sampling_thereafter,attr,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` } var _ exporter.Arguments = Arguments{} diff --git a/internal/component/otelcol/exporter/loki/loki.go b/internal/component/otelcol/exporter/loki/loki.go index 058173760c..5809d7edfc 100644 --- a/internal/component/otelcol/exporter/loki/loki.go +++ b/internal/component/otelcol/exporter/loki/loki.go @@ -28,7 +28,7 @@ func init() { // Arguments configures the otelcol.exporter.loki component. type Arguments struct { - ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` + ForwardTo []loki.LogsReceiver `alloy:"forward_to,attr"` } // Component is the otelcol.exporter.loki component. diff --git a/internal/component/otelcol/exporter/otlp/otlp.go b/internal/component/otelcol/exporter/otlp/otlp.go index a86aec2219..cd46c29901 100644 --- a/internal/component/otelcol/exporter/otlp/otlp.go +++ b/internal/component/otelcol/exporter/otlp/otlp.go @@ -30,15 +30,15 @@ func init() { // Arguments configures the otelcol.exporter.otlp component. type Arguments struct { - Timeout time.Duration `river:"timeout,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` - Queue otelcol.QueueArguments `river:"sending_queue,block,optional"` - Retry otelcol.RetryArguments `river:"retry_on_failure,block,optional"` + Queue otelcol.QueueArguments `alloy:"sending_queue,block,optional"` + Retry otelcol.RetryArguments `alloy:"retry_on_failure,block,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` - Client GRPCClientArguments `river:"client,block"` + Client GRPCClientArguments `alloy:"client,block"` } var _ exporter.Arguments = Arguments{} diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp.go b/internal/component/otelcol/exporter/otlphttp/otlphttp.go index a5c4dbe1de..6457d379ca 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp.go @@ -30,20 +30,20 @@ func init() { // Arguments configures the otelcol.exporter.otlphttp component. type Arguments struct { - Client HTTPClientArguments `river:"client,block"` - Queue otelcol.QueueArguments `river:"sending_queue,block,optional"` - Retry otelcol.RetryArguments `river:"retry_on_failure,block,optional"` + Client HTTPClientArguments `alloy:"client,block"` + Queue otelcol.QueueArguments `alloy:"sending_queue,block,optional"` + Retry otelcol.RetryArguments `alloy:"retry_on_failure,block,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // The URLs to send metrics/logs/traces to. If omitted the exporter will // use Client.Endpoint by appending "/v1/metrics", "/v1/logs" or // "/v1/traces", respectively. If set, these settings override // Client.Endpoint for the corresponding signal. - TracesEndpoint string `river:"traces_endpoint,attr,optional"` - MetricsEndpoint string `river:"metrics_endpoint,attr,optional"` - LogsEndpoint string `river:"logs_endpoint,attr,optional"` + TracesEndpoint string `alloy:"traces_endpoint,attr,optional"` + MetricsEndpoint string `alloy:"metrics_endpoint,attr,optional"` + LogsEndpoint string `alloy:"logs_endpoint,attr,optional"` } var _ exporter.Arguments = Arguments{} diff --git a/internal/component/otelcol/exporter/prometheus/prometheus.go b/internal/component/otelcol/exporter/prometheus/prometheus.go index 630f341af6..f85f566bb0 100644 --- a/internal/component/otelcol/exporter/prometheus/prometheus.go +++ b/internal/component/otelcol/exporter/prometheus/prometheus.go @@ -33,13 +33,13 @@ func init() { // Arguments configures the otelcol.exporter.prometheus component. type Arguments struct { - IncludeTargetInfo bool `river:"include_target_info,attr,optional"` - IncludeScopeInfo bool `river:"include_scope_info,attr,optional"` - IncludeScopeLabels bool `river:"include_scope_labels,attr,optional"` - GCFrequency time.Duration `river:"gc_frequency,attr,optional"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` - AddMetricSuffixes bool `river:"add_metric_suffixes,attr,optional"` - ResourceToTelemetryConversion bool `river:"resource_to_telemetry_conversion,attr,optional"` + IncludeTargetInfo bool `alloy:"include_target_info,attr,optional"` + IncludeScopeInfo bool `alloy:"include_scope_info,attr,optional"` + IncludeScopeLabels bool `alloy:"include_scope_labels,attr,optional"` + GCFrequency time.Duration `alloy:"gc_frequency,attr,optional"` + ForwardTo []storage.Appendable `alloy:"forward_to,attr"` + AddMetricSuffixes bool `alloy:"add_metric_suffixes,attr,optional"` + ResourceToTelemetryConversion bool `alloy:"resource_to_telemetry_conversion,attr,optional"` } // DefaultArguments holds defaults values. diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go index 3b4b5a7689..815e734a82 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go @@ -39,17 +39,17 @@ type ( // Arguments configures the otelcol.extension.jaegerremotesampling component. type Arguments struct { - GRPC *GRPCServerArguments `river:"grpc,block,optional"` - HTTP *HTTPServerArguments `river:"http,block,optional"` + GRPC *GRPCServerArguments `alloy:"grpc,block,optional"` + HTTP *HTTPServerArguments `alloy:"http,block,optional"` - Source ArgumentsSource `river:"source,block"` + Source ArgumentsSource `alloy:"source,block"` } type ArgumentsSource struct { - Content string `river:"content,attr,optional"` - Remote *GRPCClientArguments `river:"remote,block,optional"` - File string `river:"file,attr,optional"` - ReloadInterval time.Duration `river:"reload_interval,attr,optional"` + Content string `alloy:"content,attr,optional"` + Remote *GRPCClientArguments `alloy:"remote,block,optional"` + File string `alloy:"file,attr,optional"` + ReloadInterval time.Duration `alloy:"reload_interval,attr,optional"` } var ( diff --git a/internal/component/otelcol/processor/attributes/attributes.go b/internal/component/otelcol/processor/attributes/attributes.go index fb7139c388..0f0cef5771 100644 --- a/internal/component/otelcol/processor/attributes/attributes.go +++ b/internal/component/otelcol/processor/attributes/attributes.go @@ -31,14 +31,14 @@ func init() { // Arguments configures the otelcol.processor.attributes component. type Arguments struct { // Pre-processing filtering to include/exclude data from the processor. - Match otelcol.MatchConfig `river:",squash"` + Match otelcol.MatchConfig `alloy:",squash"` // Actions performed on the input data in the order specified in the config. // Example actions are "insert", "update", "upsert", "delete", "hash". - Actions otelcol.AttrActionKeyValueSlice `river:"action,block,optional"` + Actions otelcol.AttrActionKeyValueSlice `alloy:"action,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/batch/batch.go b/internal/component/otelcol/processor/batch/batch.go index 1d79cca1f0..3f659fb6bd 100644 --- a/internal/component/otelcol/processor/batch/batch.go +++ b/internal/component/otelcol/processor/batch/batch.go @@ -30,14 +30,14 @@ func init() { // Arguments configures the otelcol.processor.batch component. type Arguments struct { - Timeout time.Duration `river:"timeout,attr,optional"` - SendBatchSize uint32 `river:"send_batch_size,attr,optional"` - SendBatchMaxSize uint32 `river:"send_batch_max_size,attr,optional"` - MetadataKeys []string `river:"metadata_keys,attr,optional"` - MetadataCardinalityLimit uint32 `river:"metadata_cardinality_limit,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + SendBatchSize uint32 `alloy:"send_batch_size,attr,optional"` + SendBatchMaxSize uint32 `alloy:"send_batch_max_size,attr,optional"` + MetadataKeys []string `alloy:"metadata_keys,attr,optional"` + MetadataCardinalityLimit uint32 `alloy:"metadata_cardinality_limit,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/discovery/discovery.go b/internal/component/otelcol/processor/discovery/discovery.go index 2607ada090..24de52597a 100644 --- a/internal/component/otelcol/processor/discovery/discovery.go +++ b/internal/component/otelcol/processor/discovery/discovery.go @@ -32,12 +32,12 @@ func init() { // Arguments configures the otelcol.processor.discovery component. type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - OperationType string `river:"operation_type,attr,optional"` - PodAssociations []string `river:"pod_associations,attr,optional"` + Targets []discovery.Target `alloy:"targets,attr"` + OperationType string `alloy:"operation_type,attr,optional"` + PodAssociations []string `alloy:"pod_associations,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/filter/filter.go b/internal/component/otelcol/processor/filter/filter.go index ef0915d364..64c01d15b6 100644 --- a/internal/component/otelcol/processor/filter/filter.go +++ b/internal/component/otelcol/processor/filter/filter.go @@ -28,13 +28,13 @@ func init() { type Arguments struct { // ErrorMode determines how the processor reacts to errors that occur while processing a statement. - ErrorMode ottl.ErrorMode `river:"error_mode,attr,optional"` - Traces TraceConfig `river:"traces,block,optional"` - Metrics MetricConfig `river:"metrics,block,optional"` - Logs LogConfig `river:"logs,block,optional"` + ErrorMode ottl.ErrorMode `alloy:"error_mode,attr,optional"` + Traces TraceConfig `alloy:"traces,block,optional"` + Metrics MetricConfig `alloy:"metrics,block,optional"` + Logs LogConfig `alloy:"logs,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/filter/types.go b/internal/component/otelcol/processor/filter/types.go index 0752930abf..559e84e296 100644 --- a/internal/component/otelcol/processor/filter/types.go +++ b/internal/component/otelcol/processor/filter/types.go @@ -1,16 +1,16 @@ package filter type TraceConfig struct { - Span []string `river:"span,attr,optional"` - SpanEvent []string `river:"spanevent,attr,optional"` + Span []string `alloy:"span,attr,optional"` + SpanEvent []string `alloy:"spanevent,attr,optional"` } type MetricConfig struct { - Metric []string `river:"metric,attr,optional"` - Datapoint []string `river:"datapoint,attr,optional"` + Metric []string `alloy:"metric,attr,optional"` + Datapoint []string `alloy:"datapoint,attr,optional"` } type LogConfig struct { - LogRecord []string `river:"log_record,attr,optional"` + LogRecord []string `alloy:"log_record,attr,optional"` } func (args *TraceConfig) convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/k8sattributes/k8sattributes.go b/internal/component/otelcol/processor/k8sattributes/k8sattributes.go index 082fc6d51a..c5805c525d 100644 --- a/internal/component/otelcol/processor/k8sattributes/k8sattributes.go +++ b/internal/component/otelcol/processor/k8sattributes/k8sattributes.go @@ -32,15 +32,15 @@ var ( // Arguments configures the otelcol.processor.k8sattributes component. type Arguments struct { - AuthType string `river:"auth_type,attr,optional"` - Passthrough bool `river:"passthrough,attr,optional"` - ExtractConfig ExtractConfig `river:"extract,block,optional"` - Filter FilterConfig `river:"filter,block,optional"` - PodAssociations PodAssociationSlice `river:"pod_association,block,optional"` - Exclude ExcludeConfig `river:"exclude,block,optional"` + AuthType string `alloy:"auth_type,attr,optional"` + Passthrough bool `alloy:"passthrough,attr,optional"` + ExtractConfig ExtractConfig `alloy:"extract,block,optional"` + Filter FilterConfig `alloy:"filter,block,optional"` + PodAssociations PodAssociationSlice `alloy:"pod_association,block,optional"` + Exclude ExcludeConfig `alloy:"exclude,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/otelcol/processor/k8sattributes/types.go b/internal/component/otelcol/processor/k8sattributes/types.go index d44d8f5828..8977caa880 100644 --- a/internal/component/otelcol/processor/k8sattributes/types.go +++ b/internal/component/otelcol/processor/k8sattributes/types.go @@ -1,11 +1,11 @@ package k8sattributes type FieldExtractConfig struct { - TagName string `river:"tag_name,attr,optional"` - Key string `river:"key,attr,optional"` - KeyRegex string `river:"key_regex,attr,optional"` - Regex string `river:"regex,attr,optional"` - From string `river:"from,attr,optional"` + TagName string `alloy:"tag_name,attr,optional"` + Key string `alloy:"key,attr,optional"` + KeyRegex string `alloy:"key_regex,attr,optional"` + Regex string `alloy:"regex,attr,optional"` + From string `alloy:"from,attr,optional"` } func (args FieldExtractConfig) convert() map[string]interface{} { @@ -19,9 +19,9 @@ func (args FieldExtractConfig) convert() map[string]interface{} { } type ExtractConfig struct { - Metadata []string `river:"metadata,attr,optional"` - Annotations []FieldExtractConfig `river:"annotation,block,optional"` - Labels []FieldExtractConfig `river:"label,block,optional"` + Metadata []string `alloy:"metadata,attr,optional"` + Annotations []FieldExtractConfig `alloy:"annotation,block,optional"` + Labels []FieldExtractConfig `alloy:"label,block,optional"` } func (args ExtractConfig) convert() map[string]interface{} { @@ -44,9 +44,9 @@ func (args ExtractConfig) convert() map[string]interface{} { } type FieldFilterConfig struct { - Key string `river:"key,attr"` - Value string `river:"value,attr"` - Op string `river:"op,attr,optional"` + Key string `alloy:"key,attr"` + Value string `alloy:"value,attr"` + Op string `alloy:"op,attr,optional"` } func (args FieldFilterConfig) convert() map[string]interface{} { @@ -58,10 +58,10 @@ func (args FieldFilterConfig) convert() map[string]interface{} { } type FilterConfig struct { - Node string `river:"node,attr,optional"` - Namespace string `river:"namespace,attr,optional"` - Fields []FieldFilterConfig `river:"field,block,optional"` - Labels []FieldFilterConfig `river:"label,block,optional"` + Node string `alloy:"node,attr,optional"` + Namespace string `alloy:"namespace,attr,optional"` + Fields []FieldFilterConfig `alloy:"field,block,optional"` + Labels []FieldFilterConfig `alloy:"label,block,optional"` } func (args FilterConfig) convert() map[string]interface{} { @@ -97,7 +97,7 @@ func (args FilterConfig) convert() map[string]interface{} { } type PodAssociation struct { - Sources []PodAssociationSource `river:"source,block"` + Sources []PodAssociationSource `alloy:"source,block"` } func (args PodAssociation) convert() []map[string]interface{} { @@ -111,8 +111,8 @@ func (args PodAssociation) convert() []map[string]interface{} { } type PodAssociationSource struct { - From string `river:"from,attr"` - Name string `river:"name,attr,optional"` + From string `alloy:"from,attr"` + Name string `alloy:"name,attr,optional"` } func (args PodAssociationSource) convert() map[string]interface{} { @@ -137,11 +137,11 @@ func (args PodAssociationSlice) convert() []map[string]interface{} { } type ExcludeConfig struct { - Pods []ExcludePodConfig `river:"pod,block,optional"` + Pods []ExcludePodConfig `alloy:"pod,block,optional"` } type ExcludePodConfig struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` } func (args ExcludePodConfig) convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/memorylimiter/memorylimiter.go b/internal/component/otelcol/processor/memorylimiter/memorylimiter.go index ee6cc47492..745409749c 100644 --- a/internal/component/otelcol/processor/memorylimiter/memorylimiter.go +++ b/internal/component/otelcol/processor/memorylimiter/memorylimiter.go @@ -31,14 +31,14 @@ func init() { // Arguments configures the otelcol.processor.memory_limiter component. type Arguments struct { - CheckInterval time.Duration `river:"check_interval,attr"` - MemoryLimit units.Base2Bytes `river:"limit,attr,optional"` - MemorySpikeLimit units.Base2Bytes `river:"spike_limit,attr,optional"` - MemoryLimitPercentage uint32 `river:"limit_percentage,attr,optional"` - MemorySpikePercentage uint32 `river:"spike_limit_percentage,attr,optional"` + CheckInterval time.Duration `alloy:"check_interval,attr"` + MemoryLimit units.Base2Bytes `alloy:"limit,attr,optional"` + MemorySpikeLimit units.Base2Bytes `alloy:"spike_limit,attr,optional"` + MemoryLimitPercentage uint32 `alloy:"limit_percentage,attr,optional"` + MemorySpikePercentage uint32 `alloy:"spike_limit_percentage,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index ca33f9792f..e23d183818 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -28,14 +28,14 @@ func init() { // Arguments configures the otelcol.processor.probabilistic_sampler component. type Arguments struct { - SamplingPercentage float32 `river:"sampling_percentage,attr,optional"` - HashSeed uint32 `river:"hash_seed,attr,optional"` - AttributeSource string `river:"attribute_source,attr,optional"` - FromAttribute string `river:"from_attribute,attr,optional"` - SamplingPriority string `river:"sampling_priority,attr,optional"` + SamplingPercentage float32 `alloy:"sampling_percentage,attr,optional"` + HashSeed uint32 `alloy:"hash_seed,attr,optional"` + AttributeSource string `alloy:"attribute_source,attr,optional"` + FromAttribute string `alloy:"from_attribute,attr,optional"` + SamplingPriority string `alloy:"sampling_priority,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go index 41ccdf00b9..ac8a772da9 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -11,8 +11,8 @@ const Name = "ec2" type Config struct { // Tags is a list of regex's to match ec2 instance tag keys that users want // to add as resource attributes to processed data - Tags []string `river:"tags,attr,optional"` - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + Tags []string `alloy:"tags,attr,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -46,15 +46,15 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config to enable and disable resource attributes. type ResourceAttributesConfig struct { - CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` - CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` - HostImageID rac.ResourceAttributeConfig `river:"host.image.id,block,optional"` - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` - HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `alloy:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `alloy:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `alloy:"host.id,block,optional"` + HostImageID rac.ResourceAttributeConfig `alloy:"host.image.id,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `alloy:"host.type,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go index 101941f223..e9c8a43506 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -8,7 +8,7 @@ import ( const Name = "ecs" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -50,20 +50,20 @@ func (args *Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for ecs resource attributes. type ResourceAttributesConfig struct { - AwsEcsClusterArn rac.ResourceAttributeConfig `river:"aws.ecs.cluster.arn,block,optional"` - AwsEcsLaunchtype rac.ResourceAttributeConfig `river:"aws.ecs.launchtype,block,optional"` - AwsEcsTaskArn rac.ResourceAttributeConfig `river:"aws.ecs.task.arn,block,optional"` - AwsEcsTaskFamily rac.ResourceAttributeConfig `river:"aws.ecs.task.family,block,optional"` - AwsEcsTaskRevision rac.ResourceAttributeConfig `river:"aws.ecs.task.revision,block,optional"` - AwsLogGroupArns rac.ResourceAttributeConfig `river:"aws.log.group.arns,block,optional"` - AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` - AwsLogStreamArns rac.ResourceAttributeConfig `river:"aws.log.stream.arns,block,optional"` - AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` - CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` - CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + AwsEcsClusterArn rac.ResourceAttributeConfig `alloy:"aws.ecs.cluster.arn,block,optional"` + AwsEcsLaunchtype rac.ResourceAttributeConfig `alloy:"aws.ecs.launchtype,block,optional"` + AwsEcsTaskArn rac.ResourceAttributeConfig `alloy:"aws.ecs.task.arn,block,optional"` + AwsEcsTaskFamily rac.ResourceAttributeConfig `alloy:"aws.ecs.task.family,block,optional"` + AwsEcsTaskRevision rac.ResourceAttributeConfig `alloy:"aws.ecs.task.revision,block,optional"` + AwsLogGroupArns rac.ResourceAttributeConfig `alloy:"aws.log.group.arns,block,optional"` + AwsLogGroupNames rac.ResourceAttributeConfig `alloy:"aws.log.group.names,block,optional"` + AwsLogStreamArns rac.ResourceAttributeConfig `alloy:"aws.log.stream.arns,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `alloy:"aws.log.stream.names,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `alloy:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `alloy:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go index e3bc1c962d..c44b42a639 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -8,7 +8,7 @@ import ( const Name = "eks" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -34,8 +34,8 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for eks resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go index 2593f17c6f..3500bc8b33 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -8,7 +8,7 @@ import ( const Name = "elasticbeanstalk" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -37,11 +37,11 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for elastic_beanstalk resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - DeploymentEnvironment rac.ResourceAttributeConfig `river:"deployment.environment,block,optional"` - ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` - ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + DeploymentEnvironment rac.ResourceAttributeConfig `alloy:"deployment.environment,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `alloy:"service.instance.id,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `alloy:"service.version,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go index b10990c6c3..6f185d9b2d 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -8,7 +8,7 @@ import ( const Name = "lambda" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -41,15 +41,15 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for lambda resource attributes. type ResourceAttributesConfig struct { - AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` - AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` - FaasMaxMemory rac.ResourceAttributeConfig `river:"faas.max_memory,block,optional"` - FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` - FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` + AwsLogGroupNames rac.ResourceAttributeConfig `alloy:"aws.log.group.names,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `alloy:"aws.log.stream.names,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + FaasInstance rac.ResourceAttributeConfig `alloy:"faas.instance,block,optional"` + FaasMaxMemory rac.ResourceAttributeConfig `alloy:"faas.max_memory,block,optional"` + FaasName rac.ResourceAttributeConfig `alloy:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `alloy:"faas.version,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go index a38838e3d0..656875bd27 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -8,7 +8,7 @@ import ( const Name = "aks" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -34,8 +34,8 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for aks resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go index 30a40298b4..871ea577ac 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -8,7 +8,7 @@ import ( const Name = "azure" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -42,16 +42,16 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for azure resource attributes. type ResourceAttributesConfig struct { - AzureResourcegroupName rac.ResourceAttributeConfig `river:"azure.resourcegroup.name,block,optional"` - AzureVMName rac.ResourceAttributeConfig `river:"azure.vm.name,block,optional"` - AzureVMScalesetName rac.ResourceAttributeConfig `river:"azure.vm.scaleset.name,block,optional"` - AzureVMSize rac.ResourceAttributeConfig `river:"azure.vm.size,block,optional"` - CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + AzureResourcegroupName rac.ResourceAttributeConfig `alloy:"azure.resourcegroup.name,block,optional"` + AzureVMName rac.ResourceAttributeConfig `alloy:"azure.vm.name,block,optional"` + AzureVMScalesetName rac.ResourceAttributeConfig `alloy:"azure.vm.scaleset.name,block,optional"` + AzureVMSize rac.ResourceAttributeConfig `alloy:"azure.vm.size,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `alloy:"cloud.account.id,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `alloy:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index f35f8f8002..b9c6b20886 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -14,32 +14,32 @@ const Name = "consul" // See `consul.go#NewDetector` for more information. type Config struct { // Address is the address of the Consul server - Address string `river:"address,attr,optional"` + Address string `alloy:"address,attr,optional"` // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string `river:"datacenter,attr,optional"` + Datacenter string `alloy:"datacenter,attr,optional"` // Token is used to provide a per-request ACL token which overrides the // agent's default (empty) token. Token is only required if // [Consul's ACL System](https://www.consul.io/docs/security/acl/acl-system) // is enabled. - Token alloytypes.Secret `river:"token,attr,optional"` + Token alloytypes.Secret `alloy:"token,attr,optional"` // TokenFile is not necessary in River because users can use the local.file // Flow component instead. // - // TokenFile string `river:"token_file"` + // TokenFile string `alloy:"token_file"` // Namespace is the name of the namespace to send along for the request // when no other Namespace is present in the QueryOptions - Namespace string `river:"namespace,attr,optional"` + Namespace string `alloy:"namespace,attr,optional"` // Allowlist of [Consul Metadata](https://www.consul.io/docs/agent/options#node_meta) // keys to use as resource attributes. - MetaLabels []string `river:"meta,attr,optional"` + MetaLabels []string `alloy:"meta,attr,optional"` // ResourceAttributes configuration for Consul detector - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -80,9 +80,9 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for consul resource attributes. type ResourceAttributesConfig struct { - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `alloy:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` } func (r *ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go index a0b2828fe0..badceabdbe 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -8,7 +8,7 @@ import ( const Name = "docker" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -34,8 +34,8 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for docker resource attributes. type ResourceAttributesConfig struct { - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` - OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` + OsType rac.ResourceAttributeConfig `alloy:"os.type,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go index 85ebffb55e..da95652600 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -8,7 +8,7 @@ import ( const Name = "gcp" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -49,23 +49,23 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for gcp resource attributes. type ResourceAttributesConfig struct { - CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` - CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - FaasID rac.ResourceAttributeConfig `river:"faas.id,block,optional"` - FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` - FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` - FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` - GcpCloudRunJobExecution rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.execution,block,optional"` - GcpCloudRunJobTaskIndex rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.task_index,block,optional"` - GcpGceInstanceHostname rac.ResourceAttributeConfig `river:"gcp.gce.instance.hostname,block,optional"` - GcpGceInstanceName rac.ResourceAttributeConfig `river:"gcp.gce.instance.name,block,optional"` - HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` - HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` - K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `alloy:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `alloy:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + FaasID rac.ResourceAttributeConfig `alloy:"faas.id,block,optional"` + FaasInstance rac.ResourceAttributeConfig `alloy:"faas.instance,block,optional"` + FaasName rac.ResourceAttributeConfig `alloy:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `alloy:"faas.version,block,optional"` + GcpCloudRunJobExecution rac.ResourceAttributeConfig `alloy:"gcp.cloud_run.job.execution,block,optional"` + GcpCloudRunJobTaskIndex rac.ResourceAttributeConfig `alloy:"gcp.cloud_run.job.task_index,block,optional"` + GcpGceInstanceHostname rac.ResourceAttributeConfig `alloy:"gcp.gce.instance.hostname,block,optional"` + GcpGceInstanceName rac.ResourceAttributeConfig `alloy:"gcp.gce.instance.name,block,optional"` + HostID rac.ResourceAttributeConfig `alloy:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `alloy:"host.type,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `alloy:"k8s.cluster.name,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go index a5e36ff14b..3420a4df07 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -8,7 +8,7 @@ import ( const Name = "heroku" type Config struct { - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -40,14 +40,14 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for heroku resource attributes. type ResourceAttributesConfig struct { - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - HerokuAppID rac.ResourceAttributeConfig `river:"heroku.app.id,block,optional"` - HerokuDynoID rac.ResourceAttributeConfig `river:"heroku.dyno.id,block,optional"` - HerokuReleaseCommit rac.ResourceAttributeConfig `river:"heroku.release.commit,block,optional"` - HerokuReleaseCreationTimestamp rac.ResourceAttributeConfig `river:"heroku.release.creation_timestamp,block,optional"` - ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` - ServiceName rac.ResourceAttributeConfig `river:"service.name,block,optional"` - ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + HerokuAppID rac.ResourceAttributeConfig `alloy:"heroku.app.id,block,optional"` + HerokuDynoID rac.ResourceAttributeConfig `alloy:"heroku.dyno.id,block,optional"` + HerokuReleaseCommit rac.ResourceAttributeConfig `alloy:"heroku.release.commit,block,optional"` + HerokuReleaseCreationTimestamp rac.ResourceAttributeConfig `alloy:"heroku.release.creation_timestamp,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `alloy:"service.instance.id,block,optional"` + ServiceName rac.ResourceAttributeConfig `alloy:"service.name,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `alloy:"service.version,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go index c0922acdfd..7e4c365862 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -9,7 +9,7 @@ import ( const Name = "kubernetes_node" type Config struct { - KubernetesAPIConfig otelcol.KubernetesAPIConfig `river:",squash"` + KubernetesAPIConfig otelcol.KubernetesAPIConfig `alloy:",squash"` // NodeFromEnv can be used to extract the node name from an environment // variable. The value must be the name of the environment variable. // This is useful when the node where an Agent will run on cannot be @@ -29,8 +29,8 @@ type Config struct { // the agent is running on. // // More on downward API here: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ - NodeFromEnvVar string `river:"node_from_env_var,attr,optional"` - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + NodeFromEnvVar string `alloy:"node_from_env_var,attr,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } var DefaultArguments = Config{ @@ -63,8 +63,8 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for k8snode resource attributes. type ResourceAttributesConfig struct { - K8sNodeName rac.ResourceAttributeConfig `river:"k8s.node.name,block,optional"` - K8sNodeUID rac.ResourceAttributeConfig `river:"k8s.node.uid,block,optional"` + K8sNodeName rac.ResourceAttributeConfig `alloy:"k8s.node.name,block,optional"` + K8sNodeUID rac.ResourceAttributeConfig `alloy:"k8s.node.uid,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go index 4c53dcc74e..3e6e38af5a 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -12,16 +12,16 @@ const Name = "openshift" // See `openshift.go#NewDetector` for more information. type Config struct { // Address is the address of the openshift api server - Address string `river:"address,attr,optional"` + Address string `alloy:"address,attr,optional"` // Token is used to identify against the openshift api server - Token string `river:"token,attr,optional"` + Token string `alloy:"token,attr,optional"` // TLSSettings contains TLS configurations that are specific to client // connection used to communicate with the Openshift API. - TLSSettings otelcol.TLSClientArguments `river:"tls,block,optional"` + TLSSettings otelcol.TLSClientArguments `alloy:"tls,block,optional"` - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } // DefaultArguments holds default settings for Config. @@ -52,10 +52,10 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for openshift resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` - CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` - K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `alloy:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `alloy:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `alloy:"cloud.region,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `alloy:"k8s.cluster.name,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go b/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go index ff5540a2f5..2af8a4b40f 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go @@ -2,7 +2,7 @@ package resource_attribute_config // Configures whether a resource attribute should be enabled or not. type ResourceAttributeConfig struct { - Enabled bool `river:"enabled,attr"` + Enabled bool `alloy:"enabled,attr"` } func (r ResourceAttributeConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index 4ab51788a1..8c90539a33 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -14,9 +14,9 @@ type Config struct { // The HostnameSources is a priority list of sources from which hostname will be fetched. // In case of the error in fetching hostname from source, // the next source from the list will be considered. - HostnameSources []string `river:"hostname_sources,attr,optional"` + HostnameSources []string `alloy:"hostname_sources,attr,optional"` - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } var _ syntax.Defaulter = (*Config)(nil) @@ -63,17 +63,17 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for system resource attributes. type ResourceAttributesConfig struct { - HostArch rac.ResourceAttributeConfig `river:"host.arch,block,optional"` - HostCPUCacheL2Size rac.ResourceAttributeConfig `river:"host.cpu.cache.l2.size,block,optional"` - HostCPUFamily rac.ResourceAttributeConfig `river:"host.cpu.family,block,optional"` - HostCPUModelID rac.ResourceAttributeConfig `river:"host.cpu.model.id,block,optional"` - HostCPUModelName rac.ResourceAttributeConfig `river:"host.cpu.model.name,block,optional"` - HostCPUStepping rac.ResourceAttributeConfig `river:"host.cpu.stepping,block,optional"` - HostCPUVendorID rac.ResourceAttributeConfig `river:"host.cpu.vendor.id,block,optional"` - HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` - HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` - OsDescription rac.ResourceAttributeConfig `river:"os.description,block,optional"` - OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` + HostArch rac.ResourceAttributeConfig `alloy:"host.arch,block,optional"` + HostCPUCacheL2Size rac.ResourceAttributeConfig `alloy:"host.cpu.cache.l2.size,block,optional"` + HostCPUFamily rac.ResourceAttributeConfig `alloy:"host.cpu.family,block,optional"` + HostCPUModelID rac.ResourceAttributeConfig `alloy:"host.cpu.model.id,block,optional"` + HostCPUModelName rac.ResourceAttributeConfig `alloy:"host.cpu.model.name,block,optional"` + HostCPUStepping rac.ResourceAttributeConfig `alloy:"host.cpu.stepping,block,optional"` + HostCPUVendorID rac.ResourceAttributeConfig `alloy:"host.cpu.vendor.id,block,optional"` + HostID rac.ResourceAttributeConfig `alloy:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `alloy:"host.name,block,optional"` + OsDescription rac.ResourceAttributeConfig `alloy:"os.description,block,optional"` + OsType rac.ResourceAttributeConfig `alloy:"os.type,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index 8a6a8a8f61..7e5d617163 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -47,19 +47,19 @@ func init() { type Arguments struct { // Detectors is an ordered list of named detectors that should be // run to attempt to detect resource information. - Detectors []string `river:"detectors,attr,optional"` + Detectors []string `alloy:"detectors,attr,optional"` // Override indicates whether any existing resource attributes // should be overridden or preserved. Defaults to true. - Override bool `river:"override,attr,optional"` + Override bool `alloy:"override,attr,optional"` // DetectorConfig is a list of settings specific to all detectors - DetectorConfig DetectorConfig `river:",squash"` + DetectorConfig DetectorConfig `alloy:",squash"` // HTTP client settings for the detector // Timeout default is 5s - Timeout time.Duration `river:"timeout,attr,optional"` - // Client otelcol.HTTPClientArguments `river:",squash"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + // Client otelcol.HTTPClientArguments `alloy:",squash"` //TODO: Uncomment this later, and remove Timeout? // Can we just get away with a timeout, or do we need all the http client settings? // It seems that HTTP client settings are only used in the ec2 detection via ClientFromContext. @@ -68,52 +68,52 @@ type Arguments struct { // We'd have to mention that they're only for a very specific use case. // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } // DetectorConfig contains user-specified configurations unique to all individual detectors type DetectorConfig struct { // EC2Config contains user-specified configurations for the EC2 detector - EC2Config ec2.Config `river:"ec2,block,optional"` + EC2Config ec2.Config `alloy:"ec2,block,optional"` // ECSConfig contains user-specified configurations for the ECS detector - ECSConfig ecs.Config `river:"ecs,block,optional"` + ECSConfig ecs.Config `alloy:"ecs,block,optional"` // EKSConfig contains user-specified configurations for the EKS detector - EKSConfig eks.Config `river:"eks,block,optional"` + EKSConfig eks.Config `alloy:"eks,block,optional"` // Elasticbeanstalk contains user-specified configurations for the elasticbeanstalk detector - ElasticbeanstalkConfig elasticbeanstalk.Config `river:"elasticbeanstalk,block,optional"` + ElasticbeanstalkConfig elasticbeanstalk.Config `alloy:"elasticbeanstalk,block,optional"` // Lambda contains user-specified configurations for the lambda detector - LambdaConfig lambda.Config `river:"lambda,block,optional"` + LambdaConfig lambda.Config `alloy:"lambda,block,optional"` // Azure contains user-specified configurations for the azure detector - AzureConfig azure.Config `river:"azure,block,optional"` + AzureConfig azure.Config `alloy:"azure,block,optional"` // Aks contains user-specified configurations for the aks detector - AksConfig aks.Config `river:"aks,block,optional"` + AksConfig aks.Config `alloy:"aks,block,optional"` // ConsulConfig contains user-specified configurations for the Consul detector - ConsulConfig consul.Config `river:"consul,block,optional"` + ConsulConfig consul.Config `alloy:"consul,block,optional"` // DockerConfig contains user-specified configurations for the docker detector - DockerConfig docker.Config `river:"docker,block,optional"` + DockerConfig docker.Config `alloy:"docker,block,optional"` // GcpConfig contains user-specified configurations for the gcp detector - GcpConfig gcp.Config `river:"gcp,block,optional"` + GcpConfig gcp.Config `alloy:"gcp,block,optional"` // HerokuConfig contains user-specified configurations for the heroku detector - HerokuConfig heroku.Config `river:"heroku,block,optional"` + HerokuConfig heroku.Config `alloy:"heroku,block,optional"` // SystemConfig contains user-specified configurations for the System detector - SystemConfig system.Config `river:"system,block,optional"` + SystemConfig system.Config `alloy:"system,block,optional"` // OpenShift contains user-specified configurations for the Openshift detector - OpenShiftConfig openshift.Config `river:"openshift,block,optional"` + OpenShiftConfig openshift.Config `alloy:"openshift,block,optional"` // KubernetesNode contains user-specified configurations for the K8SNode detector - KubernetesNodeConfig k8snode.Config `river:"kubernetes_node,block,optional"` + KubernetesNodeConfig k8snode.Config `alloy:"kubernetes_node,block,optional"` } func (dc *DetectorConfig) SetToDefault() { diff --git a/internal/component/otelcol/processor/span/span.go b/internal/component/otelcol/processor/span/span.go index 9c1fb5ef30..57e4884709 100644 --- a/internal/component/otelcol/processor/span/span.go +++ b/internal/component/otelcol/processor/span/span.go @@ -31,16 +31,16 @@ func init() { // Arguments configures the otelcol.processor.span component. type Arguments struct { - Match otelcol.MatchConfig `river:",squash"` + Match otelcol.MatchConfig `alloy:",squash"` // Name specifies the components required to re-name a span. - Name Name `river:"name,block,optional"` + Name Name `alloy:"name,block,optional"` // SetStatus specifies status which should be set for this span. - SetStatus *Status `river:"status,block,optional"` + SetStatus *Status `alloy:"status,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( @@ -132,15 +132,15 @@ type Name struct { // will occur. // Note: The new span name is constructed in order of the `from_attributes` // specified in the configuration. This field is required and cannot be empty. - FromAttributes []string `river:"from_attributes,attr,optional"` + FromAttributes []string `alloy:"from_attributes,attr,optional"` // Separator is the string used to separate attributes values in the new // span name. If no value is set, no separator is used between attribute // values. Used with FromAttributes only. - Separator string `river:"separator,attr,optional"` + Separator string `alloy:"separator,attr,optional"` // ToAttributes specifies a configuration to extract attributes from span name. - ToAttributes *ToAttributes `river:"to_attributes,block,optional"` + ToAttributes *ToAttributes `alloy:"to_attributes,block,optional"` } func (n *Name) Convert() *spanprocessor.Name { @@ -167,12 +167,12 @@ type ToAttributes struct { // already exist in the span then they will be overwritten. The process is repeated // for all rules in the order they are specified. Each subsequent rule works on the // span name that is the output after processing the previous rule. - Rules []string `river:"rules,attr"` + Rules []string `alloy:"rules,attr"` // BreakAfterMatch specifies if processing of rules should stop after the first // match. If it is false rule processing will continue to be performed over the // modified span name. - BreakAfterMatch bool `river:"break_after_match,attr,optional"` + BreakAfterMatch bool `alloy:"break_after_match,attr,optional"` } // DefaultArguments holds default settings for Arguments. @@ -203,10 +203,10 @@ func (ta *ToAttributes) Convert() *spanprocessor.ToAttributes { type Status struct { // Code is one of three values "Ok" or "Error" or "Unset". Please check: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status - Code string `river:"code,attr"` + Code string `alloy:"code,attr"` // Description is an optional field documenting Error statuses. - Description string `river:"description,attr,optional"` + Description string `alloy:"description,attr,optional"` } var ( diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go index c06fd3a0a8..9eb6c627dc 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go @@ -30,12 +30,12 @@ func init() { // Arguments configures the otelcol.processor.tail_sampling component. type Arguments struct { - PolicyCfgs []PolicyConfig `river:"policy,block"` - DecisionWait time.Duration `river:"decision_wait,attr,optional"` - NumTraces uint64 `river:"num_traces,attr,optional"` - ExpectedNewTracesPerSec uint64 `river:"expected_new_traces_per_sec,attr,optional"` + PolicyCfgs []PolicyConfig `alloy:"policy,block"` + DecisionWait time.Duration `alloy:"decision_wait,attr,optional"` + NumTraces uint64 `alloy:"num_traces,attr,optional"` + ExpectedNewTracesPerSec uint64 `alloy:"expected_new_traces_per_sec,attr,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/processor/tail_sampling/types.go b/internal/component/otelcol/processor/tail_sampling/types.go index 36c97adca2..3c35a36189 100644 --- a/internal/component/otelcol/processor/tail_sampling/types.go +++ b/internal/component/otelcol/processor/tail_sampling/types.go @@ -12,13 +12,13 @@ import ( ) type PolicyConfig struct { - SharedPolicyConfig SharedPolicyConfig `river:",squash"` + SharedPolicyConfig SharedPolicyConfig `alloy:",squash"` // Configs for defining composite policy - CompositeConfig CompositeConfig `river:"composite,block,optional"` + CompositeConfig CompositeConfig `alloy:"composite,block,optional"` // Configs for defining and policy - AndConfig AndConfig `river:"and,block,optional"` + AndConfig AndConfig `alloy:"and,block,optional"` } func (policyConfig PolicyConfig) Convert() tsp.PolicyCfg { @@ -46,25 +46,25 @@ func (policyConfig PolicyConfig) Convert() tsp.PolicyCfg { // This cannot currently have a Convert() because tsp.sharedPolicyCfg isn't public type SharedPolicyConfig struct { - Name string `river:"name,attr"` - Type string `river:"type,attr"` - LatencyConfig LatencyConfig `river:"latency,block,optional"` - NumericAttributeConfig NumericAttributeConfig `river:"numeric_attribute,block,optional"` - ProbabilisticConfig ProbabilisticConfig `river:"probabilistic,block,optional"` - StatusCodeConfig StatusCodeConfig `river:"status_code,block,optional"` - StringAttributeConfig StringAttributeConfig `river:"string_attribute,block,optional"` - RateLimitingConfig RateLimitingConfig `river:"rate_limiting,block,optional"` - SpanCountConfig SpanCountConfig `river:"span_count,block,optional"` - BooleanAttributeConfig BooleanAttributeConfig `river:"boolean_attribute,block,optional"` - OttlConditionConfig OttlConditionConfig `river:"ottl_condition,block,optional"` - TraceStateConfig TraceStateConfig `river:"trace_state,block,optional"` + Name string `alloy:"name,attr"` + Type string `alloy:"type,attr"` + LatencyConfig LatencyConfig `alloy:"latency,block,optional"` + NumericAttributeConfig NumericAttributeConfig `alloy:"numeric_attribute,block,optional"` + ProbabilisticConfig ProbabilisticConfig `alloy:"probabilistic,block,optional"` + StatusCodeConfig StatusCodeConfig `alloy:"status_code,block,optional"` + StringAttributeConfig StringAttributeConfig `alloy:"string_attribute,block,optional"` + RateLimitingConfig RateLimitingConfig `alloy:"rate_limiting,block,optional"` + SpanCountConfig SpanCountConfig `alloy:"span_count,block,optional"` + BooleanAttributeConfig BooleanAttributeConfig `alloy:"boolean_attribute,block,optional"` + OttlConditionConfig OttlConditionConfig `alloy:"ottl_condition,block,optional"` + TraceStateConfig TraceStateConfig `alloy:"trace_state,block,optional"` } // LatencyConfig holds the configurable settings to create a latency filter sampling policy // evaluator type LatencyConfig struct { // ThresholdMs in milliseconds. - ThresholdMs int64 `river:"threshold_ms,attr"` + ThresholdMs int64 `alloy:"threshold_ms,attr"` } func (latencyConfig LatencyConfig) Convert() tsp.LatencyCfg { @@ -77,15 +77,15 @@ func (latencyConfig LatencyConfig) Convert() tsp.LatencyCfg { // sampling policy evaluator. type NumericAttributeConfig struct { // Tag that the filter is going to be matching against. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // MinValue is the minimum value of the attribute to be considered a match. - MinValue int64 `river:"min_value,attr"` + MinValue int64 `alloy:"min_value,attr"` // MaxValue is the maximum value of the attribute to be considered a match. - MaxValue int64 `river:"max_value,attr"` + MaxValue int64 `alloy:"max_value,attr"` // InvertMatch indicates that values must not match against attribute values. // If InvertMatch is true and Values is equal to '123', all other values will be sampled except '123'. // Also, if the specified Key does not match any resource or span attributes, data will be sampled. - InvertMatch bool `river:"invert_match,attr,optional"` + InvertMatch bool `alloy:"invert_match,attr,optional"` } func (numericAttributeConfig NumericAttributeConfig) Convert() tsp.NumericAttributeCfg { @@ -103,10 +103,10 @@ type ProbabilisticConfig struct { // HashSalt allows one to configure the hashing salts. This is important in scenarios where multiple layers of collectors // have different sampling rates: if they use the same salt all passing one layer may pass the other even if they have // different sampling rates, configuring different salts avoids that. - HashSalt string `river:"hash_salt,attr,optional"` + HashSalt string `alloy:"hash_salt,attr,optional"` // SamplingPercentage is the percentage rate at which traces are going to be sampled. Defaults to zero, i.e.: no sample. // Values greater or equal 100 are treated as "sample all traces". - SamplingPercentage float64 `river:"sampling_percentage,attr"` + SamplingPercentage float64 `alloy:"sampling_percentage,attr"` } func (probabilisticConfig ProbabilisticConfig) Convert() tsp.ProbabilisticCfg { @@ -119,7 +119,7 @@ func (probabilisticConfig ProbabilisticConfig) Convert() tsp.ProbabilisticCfg { // StatusCodeConfig holds the configurable settings to create a status code filter sampling // policy evaluator. type StatusCodeConfig struct { - StatusCodes []string `river:"status_codes,attr"` + StatusCodes []string `alloy:"status_codes,attr"` } func (statusCodeConfig StatusCodeConfig) Convert() tsp.StatusCodeCfg { @@ -132,20 +132,20 @@ func (statusCodeConfig StatusCodeConfig) Convert() tsp.StatusCodeCfg { // sampling policy evaluator. type StringAttributeConfig struct { // Tag that the filter is going to be matching against. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // Values indicate the set of values or regular expressions to use when matching against attribute values. // StringAttribute Policy will apply exact value match on Values unless EnabledRegexMatching is true. - Values []string `river:"values,attr"` + Values []string `alloy:"values,attr"` // EnabledRegexMatching determines whether match attribute values by regexp string. - EnabledRegexMatching bool `river:"enabled_regex_matching,attr,optional"` + EnabledRegexMatching bool `alloy:"enabled_regex_matching,attr,optional"` // CacheMaxSize is the maximum number of attribute entries of LRU Cache that stores the matched result // from the regular expressions defined in Values. // CacheMaxSize will not be used if EnabledRegexMatching is set to false. - CacheMaxSize int `river:"cache_max_size,attr,optional"` + CacheMaxSize int `alloy:"cache_max_size,attr,optional"` // InvertMatch indicates that values or regular expressions must not match against attribute values. // If InvertMatch is true and Values is equal to 'acme', all other values will be sampled except 'acme'. // Also, if the specified Key does not match on any resource or span attributes, data will be sampled. - InvertMatch bool `river:"invert_match,attr,optional"` + InvertMatch bool `alloy:"invert_match,attr,optional"` } func (stringAttributeConfig StringAttributeConfig) Convert() tsp.StringAttributeCfg { @@ -162,7 +162,7 @@ func (stringAttributeConfig StringAttributeConfig) Convert() tsp.StringAttribute // sampling policy evaluator. type RateLimitingConfig struct { // SpansPerSecond sets the limit on the maximum nuber of spans that can be processed each second. - SpansPerSecond int64 `river:"spans_per_second,attr"` + SpansPerSecond int64 `alloy:"spans_per_second,attr"` } func (rateLimitingConfig RateLimitingConfig) Convert() tsp.RateLimitingCfg { @@ -175,8 +175,8 @@ func (rateLimitingConfig RateLimitingConfig) Convert() tsp.RateLimitingCfg { // sampling policy evaluator type SpanCountConfig struct { // Minimum number of spans in a Trace - MinSpans int32 `river:"min_spans,attr"` - MaxSpans int32 `river:"max_spans,attr,optional"` + MinSpans int32 `alloy:"min_spans,attr"` + MaxSpans int32 `alloy:"max_spans,attr,optional"` } func (spanCountConfig SpanCountConfig) Convert() tsp.SpanCountCfg { @@ -190,10 +190,10 @@ func (spanCountConfig SpanCountConfig) Convert() tsp.SpanCountCfg { // sampling policy evaluator. type BooleanAttributeConfig struct { // Tag that the filter is going to be matching against. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // Value indicate the bool value, either true or false to use when matching against attribute values. // BooleanAttribute Policy will apply exact value match on Value - Value bool `river:"value,attr"` + Value bool `alloy:"value,attr"` } func (booleanAttributeConfig BooleanAttributeConfig) Convert() tsp.BooleanAttributeCfg { @@ -264,9 +264,9 @@ func (e *ErrorMode) UnmarshalText(text []byte) error { // OttlConditionConfig holds the configurable setting to create a OTTL condition filter // sampling policy evaluator. type OttlConditionConfig struct { - ErrorMode ErrorMode `river:"error_mode,attr"` - SpanConditions []string `river:"span,attr,optional"` - SpanEventConditions []string `river:"spanevent,attr,optional"` + ErrorMode ErrorMode `alloy:"error_mode,attr"` + SpanConditions []string `alloy:"span,attr,optional"` + SpanEventConditions []string `alloy:"spanevent,attr,optional"` } func (ottlConditionConfig OttlConditionConfig) Convert() tsp.OTTLConditionCfg { @@ -279,9 +279,9 @@ func (ottlConditionConfig OttlConditionConfig) Convert() tsp.OTTLConditionCfg { type TraceStateConfig struct { // Tag that the filter is going to be matching against. - Key string `river:"key,attr"` + Key string `alloy:"key,attr"` // Values indicate the set of values to use when matching against trace_state values. - Values []string `river:"values,attr"` + Values []string `alloy:"values,attr"` } func (traceStateConfig TraceStateConfig) Convert() tsp.TraceStateCfg { @@ -294,10 +294,10 @@ func (traceStateConfig TraceStateConfig) Convert() tsp.TraceStateCfg { // CompositeConfig holds the configurable settings to create a composite // sampling policy evaluator. type CompositeConfig struct { - MaxTotalSpansPerSecond int64 `river:"max_total_spans_per_second,attr"` - PolicyOrder []string `river:"policy_order,attr"` - SubPolicyCfg []CompositeSubPolicyConfig `river:"composite_sub_policy,block,optional"` - RateAllocation []RateAllocationConfig `river:"rate_allocation,block,optional"` + MaxTotalSpansPerSecond int64 `alloy:"max_total_spans_per_second,attr"` + PolicyOrder []string `alloy:"policy_order,attr"` + SubPolicyCfg []CompositeSubPolicyConfig `alloy:"composite_sub_policy,block,optional"` + RateAllocation []RateAllocationConfig `alloy:"rate_allocation,block,optional"` } func (compositeConfig CompositeConfig) Convert() tsp.CompositeCfg { @@ -321,10 +321,10 @@ func (compositeConfig CompositeConfig) Convert() tsp.CompositeCfg { // CompositeSubPolicyConfig holds the common configuration to all policies under composite policy. type CompositeSubPolicyConfig struct { - SharedPolicyConfig SharedPolicyConfig `river:",squash"` + SharedPolicyConfig SharedPolicyConfig `alloy:",squash"` // Configs for and policy evaluator. - AndConfig AndConfig `river:"and,block,optional"` + AndConfig AndConfig `alloy:"and,block,optional"` } func (compositeSubPolicyConfig CompositeSubPolicyConfig) Convert() tsp.CompositeSubPolicyCfg { @@ -351,8 +351,8 @@ func (compositeSubPolicyConfig CompositeSubPolicyConfig) Convert() tsp.Composite // RateAllocationConfig used within composite policy type RateAllocationConfig struct { - Policy string `river:"policy,attr"` - Percent int64 `river:"percent,attr"` + Policy string `alloy:"policy,attr"` + Percent int64 `alloy:"percent,attr"` } func (rateAllocationConfig RateAllocationConfig) Convert() tsp.RateAllocationCfg { @@ -363,7 +363,7 @@ func (rateAllocationConfig RateAllocationConfig) Convert() tsp.RateAllocationCfg } type AndConfig struct { - SubPolicyConfig []AndSubPolicyConfig `river:"and_sub_policy,block"` + SubPolicyConfig []AndSubPolicyConfig `alloy:"and_sub_policy,block"` } func (andConfig AndConfig) Convert() tsp.AndCfg { @@ -379,7 +379,7 @@ func (andConfig AndConfig) Convert() tsp.AndCfg { // AndSubPolicyConfig holds the common configuration to all policies under and policy. type AndSubPolicyConfig struct { - SharedPolicyConfig SharedPolicyConfig `river:",squash"` + SharedPolicyConfig SharedPolicyConfig `alloy:",squash"` } func (andSubPolicyConfig AndSubPolicyConfig) Convert() tsp.AndSubPolicyCfg { diff --git a/internal/component/otelcol/processor/transform/transform.go b/internal/component/otelcol/processor/transform/transform.go index aabae21e4c..3dd62edb7f 100644 --- a/internal/component/otelcol/processor/transform/transform.go +++ b/internal/component/otelcol/processor/transform/transform.go @@ -56,20 +56,20 @@ func (c *ContextID) UnmarshalText(text []byte) error { type ContextStatementsSlice []ContextStatements type ContextStatements struct { - Context ContextID `river:"context,attr"` - Statements []string `river:"statements,attr"` + Context ContextID `alloy:"context,attr"` + Statements []string `alloy:"statements,attr"` } // Arguments configures the otelcol.processor.transform component. type Arguments struct { // ErrorMode determines how the processor reacts to errors that occur while processing a statement. - ErrorMode ottl.ErrorMode `river:"error_mode,attr,optional"` - TraceStatements ContextStatementsSlice `river:"trace_statements,block,optional"` - MetricStatements ContextStatementsSlice `river:"metric_statements,block,optional"` - LogStatements ContextStatementsSlice `river:"log_statements,block,optional"` + ErrorMode ottl.ErrorMode `alloy:"error_mode,attr,optional"` + TraceStatements ContextStatementsSlice `alloy:"trace_statements,block,optional"` + MetricStatements ContextStatementsSlice `alloy:"metric_statements,block,optional"` + LogStatements ContextStatementsSlice `alloy:"log_statements,block,optional"` // Output configures where to send processed data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var ( diff --git a/internal/component/otelcol/receiver/jaeger/jaeger.go b/internal/component/otelcol/receiver/jaeger/jaeger.go index c7817cc68a..81d11952b8 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger.go @@ -31,13 +31,13 @@ func init() { // Arguments configures the otelcol.receiver.jaeger component. type Arguments struct { - Protocols ProtocolsArguments `river:"protocols,block"` + Protocols ProtocolsArguments `alloy:"protocols,block"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var _ receiver.Arguments = Arguments{} @@ -91,14 +91,14 @@ func (args Arguments) NextConsumers() *otelcol.ConsumerArguments { // ProtocolsArguments configures protocols for otelcol.receiver.jaeger to // listen on. type ProtocolsArguments struct { - GRPC *GRPC `river:"grpc,block,optional"` - ThriftHTTP *ThriftHTTP `river:"thrift_http,block,optional"` - ThriftBinary *ThriftBinary `river:"thrift_binary,block,optional"` - ThriftCompact *ThriftCompact `river:"thrift_compact,block,optional"` + GRPC *GRPC `alloy:"grpc,block,optional"` + ThriftHTTP *ThriftHTTP `alloy:"thrift_http,block,optional"` + ThriftBinary *ThriftBinary `alloy:"thrift_binary,block,optional"` + ThriftCompact *ThriftCompact `alloy:"thrift_compact,block,optional"` } type GRPC struct { - GRPCServerArguments *otelcol.GRPCServerArguments `river:",squash"` + GRPCServerArguments *otelcol.GRPCServerArguments `alloy:",squash"` } // SetToDefault implements river.Defaulter. @@ -121,7 +121,7 @@ func (args *GRPC) Convert() *otelconfiggrpc.GRPCServerSettings { } type ThriftHTTP struct { - HTTPServerArguments *otelcol.HTTPServerArguments `river:",squash"` + HTTPServerArguments *otelcol.HTTPServerArguments `alloy:",squash"` } // SetToDefault implements river.Defaulter. @@ -144,11 +144,11 @@ func (args *ThriftHTTP) Convert() *otelconfighttp.HTTPServerSettings { // ProtocolUDP configures a UDP server. type ProtocolUDP struct { - Endpoint string `river:"endpoint,attr,optional"` - QueueSize int `river:"queue_size,attr,optional"` - MaxPacketSize units.Base2Bytes `river:"max_packet_size,attr,optional"` - Workers int `river:"workers,attr,optional"` - SocketBufferSize units.Base2Bytes `river:"socket_buffer_size,attr,optional"` + Endpoint string `alloy:"endpoint,attr,optional"` + QueueSize int `alloy:"queue_size,attr,optional"` + MaxPacketSize units.Base2Bytes `alloy:"max_packet_size,attr,optional"` + Workers int `alloy:"workers,attr,optional"` + SocketBufferSize units.Base2Bytes `alloy:"socket_buffer_size,attr,optional"` } // Convert converts proto into the upstream type. @@ -170,7 +170,7 @@ func (proto *ProtocolUDP) Convert() *jaegerreceiver.ProtocolUDP { // ThriftCompact wraps ProtocolUDP and provides additional behavior. type ThriftCompact struct { - ProtocolUDP *ProtocolUDP `river:",squash"` + ProtocolUDP *ProtocolUDP `alloy:",squash"` } // SetToDefault implements river.Defaulter. @@ -196,7 +196,7 @@ func (args *ThriftCompact) Convert() *jaegerreceiver.ProtocolUDP { // ThriftCompact wraps ProtocolUDP and provides additional behavior. type ThriftBinary struct { - ProtocolUDP *ProtocolUDP `river:",squash"` + ProtocolUDP *ProtocolUDP `alloy:",squash"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index 109add84a1..aa1a3e4e52 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -31,25 +31,25 @@ func init() { // Arguments configures the otelcol.receiver.kafka component. type Arguments struct { - Brokers []string `river:"brokers,attr"` - ProtocolVersion string `river:"protocol_version,attr"` - Topic string `river:"topic,attr,optional"` - Encoding string `river:"encoding,attr,optional"` - GroupID string `river:"group_id,attr,optional"` - ClientID string `river:"client_id,attr,optional"` - InitialOffset string `river:"initial_offset,attr,optional"` - - Authentication AuthenticationArguments `river:"authentication,block,optional"` - Metadata MetadataArguments `river:"metadata,block,optional"` - AutoCommit AutoCommitArguments `river:"autocommit,block,optional"` - MessageMarking MessageMarkingArguments `river:"message_marking,block,optional"` - HeaderExtraction HeaderExtraction `river:"header_extraction,block,optional"` + Brokers []string `alloy:"brokers,attr"` + ProtocolVersion string `alloy:"protocol_version,attr"` + Topic string `alloy:"topic,attr,optional"` + Encoding string `alloy:"encoding,attr,optional"` + GroupID string `alloy:"group_id,attr,optional"` + ClientID string `alloy:"client_id,attr,optional"` + InitialOffset string `alloy:"initial_offset,attr,optional"` + + Authentication AuthenticationArguments `alloy:"authentication,block,optional"` + Metadata MetadataArguments `alloy:"metadata,block,optional"` + AutoCommit AutoCommitArguments `alloy:"autocommit,block,optional"` + MessageMarking MessageMarkingArguments `alloy:"message_marking,block,optional"` + HeaderExtraction HeaderExtraction `alloy:"header_extraction,block,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var _ receiver.Arguments = Arguments{} @@ -118,10 +118,10 @@ func (args Arguments) NextConsumers() *otelcol.ConsumerArguments { // AuthenticationArguments configures how to authenticate to the Kafka broker. type AuthenticationArguments struct { - Plaintext *PlaintextArguments `river:"plaintext,block,optional"` - SASL *SASLArguments `river:"sasl,block,optional"` - TLS *otelcol.TLSClientArguments `river:"tls,block,optional"` - Kerberos *KerberosArguments `river:"kerberos,block,optional"` + Plaintext *PlaintextArguments `alloy:"plaintext,block,optional"` + SASL *SASLArguments `alloy:"sasl,block,optional"` + TLS *otelcol.TLSClientArguments `alloy:"tls,block,optional"` + Kerberos *KerberosArguments `alloy:"kerberos,block,optional"` } // Convert converts args into the upstream type. @@ -150,8 +150,8 @@ func (args AuthenticationArguments) Convert() map[string]interface{} { // PlaintextArguments configures plaintext authentication against the Kafka // broker. type PlaintextArguments struct { - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` } // Convert converts args into the upstream type. @@ -164,11 +164,11 @@ func (args PlaintextArguments) Convert() map[string]interface{} { // SASLArguments configures SASL authentication against the Kafka broker. type SASLArguments struct { - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` - Mechanism string `river:"mechanism,attr"` - Version int `river:"version,attr,optional"` - AWSMSK AWSMSKArguments `river:"aws_msk,block,optional"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` + Mechanism string `alloy:"mechanism,attr"` + Version int `alloy:"version,attr,optional"` + AWSMSK AWSMSKArguments `alloy:"aws_msk,block,optional"` } // Convert converts args into the upstream type. @@ -185,8 +185,8 @@ func (args SASLArguments) Convert() map[string]interface{} { // AWSMSKArguments exposes additional SASL authentication measures required to // use the AWS_MSK_IAM mechanism. type AWSMSKArguments struct { - Region string `river:"region,attr"` - BrokerAddr string `river:"broker_addr,attr"` + Region string `alloy:"region,attr"` + BrokerAddr string `alloy:"broker_addr,attr"` } // Convert converts args into the upstream type. @@ -200,13 +200,13 @@ func (args AWSMSKArguments) Convert() map[string]interface{} { // KerberosArguments configures Kerberos authentication against the Kafka // broker. type KerberosArguments struct { - ServiceName string `river:"service_name,attr,optional"` - Realm string `river:"realm,attr,optional"` - UseKeyTab bool `river:"use_keytab,attr,optional"` - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr,optional"` - ConfigPath string `river:"config_file,attr,optional"` - KeyTabPath string `river:"keytab_file,attr,optional"` + ServiceName string `alloy:"service_name,attr,optional"` + Realm string `alloy:"realm,attr,optional"` + UseKeyTab bool `alloy:"use_keytab,attr,optional"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr,optional"` + ConfigPath string `alloy:"config_file,attr,optional"` + KeyTabPath string `alloy:"keytab_file,attr,optional"` } // Convert converts args into the upstream type. @@ -225,8 +225,8 @@ func (args KerberosArguments) Convert() map[string]interface{} { // MetadataArguments configures how the otelcol.receiver.kafka component will // retrieve metadata from the Kafka broker. type MetadataArguments struct { - IncludeAllTopics bool `river:"include_all_topics,attr,optional"` - Retry MetadataRetryArguments `river:"retry,block,optional"` + IncludeAllTopics bool `alloy:"include_all_topics,attr,optional"` + Retry MetadataRetryArguments `alloy:"retry,block,optional"` } func (args *MetadataArguments) SetToDefault() { @@ -251,8 +251,8 @@ func (args MetadataArguments) Convert() kafkaexporter.Metadata { // Kafka broker. Retrying is useful to avoid race conditions when the Kafka // broker is starting at the same time as the otelcol.receiver.kafka component. type MetadataRetryArguments struct { - MaxRetries int `river:"max_retries,attr,optional"` - Backoff time.Duration `river:"backoff,attr,optional"` + MaxRetries int `alloy:"max_retries,attr,optional"` + Backoff time.Duration `alloy:"backoff,attr,optional"` } // Convert converts args into the upstream type. @@ -266,8 +266,8 @@ func (args MetadataRetryArguments) Convert() kafkaexporter.MetadataRetry { // AutoCommitArguments configures how to automatically commit updated topic // offsets back to the Kafka broker. type AutoCommitArguments struct { - Enable bool `river:"enable,attr,optional"` - Interval time.Duration `river:"interval,attr,optional"` + Enable bool `alloy:"enable,attr,optional"` + Interval time.Duration `alloy:"interval,attr,optional"` } func (args *AutoCommitArguments) SetToDefault() { @@ -287,8 +287,8 @@ func (args AutoCommitArguments) Convert() kafkareceiver.AutoCommit { // MessageMarkingArguments configures when Kafka messages are marked as read. type MessageMarkingArguments struct { - AfterExecution bool `river:"after_execution,attr,optional"` - IncludeUnsuccessful bool `river:"include_unsuccessful,attr,optional"` + AfterExecution bool `alloy:"after_execution,attr,optional"` + IncludeUnsuccessful bool `alloy:"include_unsuccessful,attr,optional"` } func (args *MessageMarkingArguments) SetToDefault() { @@ -307,8 +307,8 @@ func (args MessageMarkingArguments) Convert() kafkareceiver.MessageMarking { } type HeaderExtraction struct { - ExtractHeaders bool `river:"extract_headers,attr,optional"` - Headers []string `river:"headers,attr,optional"` + ExtractHeaders bool `alloy:"extract_headers,attr,optional"` + Headers []string `alloy:"headers,attr,optional"` } func (h *HeaderExtraction) SetToDefault() { diff --git a/internal/component/otelcol/receiver/loki/loki.go b/internal/component/otelcol/receiver/loki/loki.go index 6904409399..7ff2e594c1 100644 --- a/internal/component/otelcol/receiver/loki/loki.go +++ b/internal/component/otelcol/receiver/loki/loki.go @@ -37,13 +37,13 @@ var hintAttributes = "loki.attribute.labels" // Arguments configures the otelcol.receiver.loki component. type Arguments struct { // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } // Exports holds the receiver that is used to send log entries to the // loki.write component. type Exports struct { - Receiver loki.LogsReceiver `river:"receiver,attr"` + Receiver loki.LogsReceiver `alloy:"receiver,attr"` } // Component is the otelcol.receiver.loki component. diff --git a/internal/component/otelcol/receiver/opencensus/opencensus.go b/internal/component/otelcol/receiver/opencensus/opencensus.go index 2de11c2578..64ed1f25ba 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus.go @@ -27,15 +27,15 @@ func init() { // Arguments configures the otelcol.receiver.opencensus component. type Arguments struct { - CorsAllowedOrigins []string `river:"cors_allowed_origins,attr,optional"` + CorsAllowedOrigins []string `alloy:"cors_allowed_origins,attr,optional"` - GRPC otelcol.GRPCServerArguments `river:",squash"` + GRPC otelcol.GRPCServerArguments `alloy:",squash"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var _ receiver.Arguments = Arguments{} diff --git a/internal/component/otelcol/receiver/otlp/otlp.go b/internal/component/otelcol/receiver/otlp/otlp.go index 9beb1f7ca3..55863fadee 100644 --- a/internal/component/otelcol/receiver/otlp/otlp.go +++ b/internal/component/otelcol/receiver/otlp/otlp.go @@ -30,27 +30,27 @@ func init() { // Arguments configures the otelcol.receiver.otlp component. type Arguments struct { - GRPC *GRPCServerArguments `river:"grpc,block,optional"` - HTTP *HTTPConfigArguments `river:"http,block,optional"` + GRPC *GRPCServerArguments `alloy:"grpc,block,optional"` + HTTP *HTTPConfigArguments `alloy:"http,block,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } type HTTPConfigArguments struct { - HTTPServerArguments *otelcol.HTTPServerArguments `river:",squash"` + HTTPServerArguments *otelcol.HTTPServerArguments `alloy:",squash"` // The URL path to receive traces on. If omitted "/v1/traces" will be used. - TracesURLPath string `river:"traces_url_path,attr,optional"` + TracesURLPath string `alloy:"traces_url_path,attr,optional"` // The URL path to receive metrics on. If omitted "/v1/metrics" will be used. - MetricsURLPath string `river:"metrics_url_path,attr,optional"` + MetricsURLPath string `alloy:"metrics_url_path,attr,optional"` // The URL path to receive logs on. If omitted "/v1/logs" will be used. - LogsURLPath string `river:"logs_url_path,attr,optional"` + LogsURLPath string `alloy:"logs_url_path,attr,optional"` } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/receiver/prometheus/prometheus.go b/internal/component/otelcol/receiver/prometheus/prometheus.go index e27488296b..c6aa4ebf59 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus.go @@ -40,13 +40,13 @@ func init() { // Arguments configures the otelcol.receiver.prometheus component. type Arguments struct { // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } // Exports are the set of fields exposed by the otelcol.receiver.prometheus // component. type Exports struct { - Receiver storage.Appendable `river:"receiver,attr"` + Receiver storage.Appendable `alloy:"receiver,attr"` } // Component is the otelcol.receiver.prometheus component. diff --git a/internal/component/otelcol/receiver/vcenter/vcenter.go b/internal/component/otelcol/receiver/vcenter/vcenter.go index b83f97f727..e1bad99588 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter.go @@ -31,7 +31,7 @@ func init() { } type MetricConfig struct { - Enabled bool `river:"enabled,attr"` + Enabled bool `alloy:"enabled,attr"` } func (r *MetricConfig) Convert() map[string]interface{} { @@ -45,45 +45,45 @@ func (r *MetricConfig) Convert() map[string]interface{} { } type MetricsConfig struct { - VcenterClusterCPUEffective MetricConfig `river:"vcenter.cluster.cpu.effective,block,optional"` - VcenterClusterCPULimit MetricConfig `river:"vcenter.cluster.cpu.limit,block,optional"` - VcenterClusterHostCount MetricConfig `river:"vcenter.cluster.host.count,block,optional"` - VcenterClusterMemoryEffective MetricConfig `river:"vcenter.cluster.memory.effective,block,optional"` - VcenterClusterMemoryLimit MetricConfig `river:"vcenter.cluster.memory.limit,block,optional"` - VcenterClusterMemoryUsed MetricConfig `river:"vcenter.cluster.memory.used,block,optional"` - VcenterClusterVMCount MetricConfig `river:"vcenter.cluster.vm.count,block,optional"` - VcenterDatastoreDiskUsage MetricConfig `river:"vcenter.datastore.disk.usage,block,optional"` - VcenterDatastoreDiskUtilization MetricConfig `river:"vcenter.datastore.disk.utilization,block,optional"` - VcenterHostCPUUsage MetricConfig `river:"vcenter.host.cpu.usage,block,optional"` - VcenterHostCPUUtilization MetricConfig `river:"vcenter.host.cpu.utilization,block,optional"` - VcenterHostDiskLatencyAvg MetricConfig `river:"vcenter.host.disk.latency.avg,block,optional"` - VcenterHostDiskLatencyMax MetricConfig `river:"vcenter.host.disk.latency.max,block,optional"` - VcenterHostDiskThroughput MetricConfig `river:"vcenter.host.disk.throughput,block,optional"` - VcenterHostMemoryUsage MetricConfig `river:"vcenter.host.memory.usage,block,optional"` - VcenterHostMemoryUtilization MetricConfig `river:"vcenter.host.memory.utilization,block,optional"` - VcenterHostNetworkPacketCount MetricConfig `river:"vcenter.host.network.packet.count,block,optional"` - VcenterHostNetworkPacketErrors MetricConfig `river:"vcenter.host.network.packet.errors,block,optional"` - VcenterHostNetworkThroughput MetricConfig `river:"vcenter.host.network.throughput,block,optional"` - VcenterHostNetworkUsage MetricConfig `river:"vcenter.host.network.usage,block,optional"` - VcenterResourcePoolCPUShares MetricConfig `river:"vcenter.resource_pool.cpu.shares,block,optional"` - VcenterResourcePoolCPUUsage MetricConfig `river:"vcenter.resource_pool.cpu.usage,block,optional"` - VcenterResourcePoolMemoryShares MetricConfig `river:"vcenter.resource_pool.memory.shares,block,optional"` - VcenterResourcePoolMemoryUsage MetricConfig `river:"vcenter.resource_pool.memory.usage,block,optional"` - VcenterVMCPUUsage MetricConfig `river:"vcenter.vm.cpu.usage,block,optional"` - VcenterVMCPUUtilization MetricConfig `river:"vcenter.vm.cpu.utilization,block,optional"` - VcenterVMDiskLatencyAvg MetricConfig `river:"vcenter.vm.disk.latency.avg,block,optional"` - VcenterVMDiskLatencyMax MetricConfig `river:"vcenter.vm.disk.latency.max,block,optional"` - VcenterVMDiskThroughput MetricConfig `river:"vcenter.vm.disk.throughput,block,optional"` - VcenterVMDiskUsage MetricConfig `river:"vcenter.vm.disk.usage,block,optional"` - VcenterVMDiskUtilization MetricConfig `river:"vcenter.vm.disk.utilization,block,optional"` - VcenterVMMemoryBallooned MetricConfig `river:"vcenter.vm.memory.ballooned,block,optional"` - VcenterVMMemorySwapped MetricConfig `river:"vcenter.vm.memory.swapped,block,optional"` - VcenterVMMemorySwappedSsd MetricConfig `river:"vcenter.vm.memory.swapped_ssd,block,optional"` - VcenterVMMemoryUsage MetricConfig `river:"vcenter.vm.memory.usage,block,optional"` - VcenterVMMemoryUtilization MetricConfig `river:"vcenter.vm.memory.utilization,block,optional"` - VcenterVMNetworkPacketCount MetricConfig `river:"vcenter.vm.network.packet.count,block,optional"` - VcenterVMNetworkThroughput MetricConfig `river:"vcenter.vm.network.throughput,block,optional"` - VcenterVMNetworkUsage MetricConfig `river:"vcenter.vm.network.usage,block,optional"` + VcenterClusterCPUEffective MetricConfig `alloy:"vcenter.cluster.cpu.effective,block,optional"` + VcenterClusterCPULimit MetricConfig `alloy:"vcenter.cluster.cpu.limit,block,optional"` + VcenterClusterHostCount MetricConfig `alloy:"vcenter.cluster.host.count,block,optional"` + VcenterClusterMemoryEffective MetricConfig `alloy:"vcenter.cluster.memory.effective,block,optional"` + VcenterClusterMemoryLimit MetricConfig `alloy:"vcenter.cluster.memory.limit,block,optional"` + VcenterClusterMemoryUsed MetricConfig `alloy:"vcenter.cluster.memory.used,block,optional"` + VcenterClusterVMCount MetricConfig `alloy:"vcenter.cluster.vm.count,block,optional"` + VcenterDatastoreDiskUsage MetricConfig `alloy:"vcenter.datastore.disk.usage,block,optional"` + VcenterDatastoreDiskUtilization MetricConfig `alloy:"vcenter.datastore.disk.utilization,block,optional"` + VcenterHostCPUUsage MetricConfig `alloy:"vcenter.host.cpu.usage,block,optional"` + VcenterHostCPUUtilization MetricConfig `alloy:"vcenter.host.cpu.utilization,block,optional"` + VcenterHostDiskLatencyAvg MetricConfig `alloy:"vcenter.host.disk.latency.avg,block,optional"` + VcenterHostDiskLatencyMax MetricConfig `alloy:"vcenter.host.disk.latency.max,block,optional"` + VcenterHostDiskThroughput MetricConfig `alloy:"vcenter.host.disk.throughput,block,optional"` + VcenterHostMemoryUsage MetricConfig `alloy:"vcenter.host.memory.usage,block,optional"` + VcenterHostMemoryUtilization MetricConfig `alloy:"vcenter.host.memory.utilization,block,optional"` + VcenterHostNetworkPacketCount MetricConfig `alloy:"vcenter.host.network.packet.count,block,optional"` + VcenterHostNetworkPacketErrors MetricConfig `alloy:"vcenter.host.network.packet.errors,block,optional"` + VcenterHostNetworkThroughput MetricConfig `alloy:"vcenter.host.network.throughput,block,optional"` + VcenterHostNetworkUsage MetricConfig `alloy:"vcenter.host.network.usage,block,optional"` + VcenterResourcePoolCPUShares MetricConfig `alloy:"vcenter.resource_pool.cpu.shares,block,optional"` + VcenterResourcePoolCPUUsage MetricConfig `alloy:"vcenter.resource_pool.cpu.usage,block,optional"` + VcenterResourcePoolMemoryShares MetricConfig `alloy:"vcenter.resource_pool.memory.shares,block,optional"` + VcenterResourcePoolMemoryUsage MetricConfig `alloy:"vcenter.resource_pool.memory.usage,block,optional"` + VcenterVMCPUUsage MetricConfig `alloy:"vcenter.vm.cpu.usage,block,optional"` + VcenterVMCPUUtilization MetricConfig `alloy:"vcenter.vm.cpu.utilization,block,optional"` + VcenterVMDiskLatencyAvg MetricConfig `alloy:"vcenter.vm.disk.latency.avg,block,optional"` + VcenterVMDiskLatencyMax MetricConfig `alloy:"vcenter.vm.disk.latency.max,block,optional"` + VcenterVMDiskThroughput MetricConfig `alloy:"vcenter.vm.disk.throughput,block,optional"` + VcenterVMDiskUsage MetricConfig `alloy:"vcenter.vm.disk.usage,block,optional"` + VcenterVMDiskUtilization MetricConfig `alloy:"vcenter.vm.disk.utilization,block,optional"` + VcenterVMMemoryBallooned MetricConfig `alloy:"vcenter.vm.memory.ballooned,block,optional"` + VcenterVMMemorySwapped MetricConfig `alloy:"vcenter.vm.memory.swapped,block,optional"` + VcenterVMMemorySwappedSsd MetricConfig `alloy:"vcenter.vm.memory.swapped_ssd,block,optional"` + VcenterVMMemoryUsage MetricConfig `alloy:"vcenter.vm.memory.usage,block,optional"` + VcenterVMMemoryUtilization MetricConfig `alloy:"vcenter.vm.memory.utilization,block,optional"` + VcenterVMNetworkPacketCount MetricConfig `alloy:"vcenter.vm.network.packet.count,block,optional"` + VcenterVMNetworkThroughput MetricConfig `alloy:"vcenter.vm.network.throughput,block,optional"` + VcenterVMNetworkUsage MetricConfig `alloy:"vcenter.vm.network.usage,block,optional"` } func (args *MetricsConfig) SetToDefault() { @@ -178,7 +178,7 @@ func (args *MetricsConfig) Convert() map[string]interface{} { } type ResourceAttributeConfig struct { - Enabled bool `river:"enabled,attr"` + Enabled bool `alloy:"enabled,attr"` } func (r *ResourceAttributeConfig) Convert() map[string]interface{} { @@ -192,13 +192,13 @@ func (r *ResourceAttributeConfig) Convert() map[string]interface{} { } type ResourceAttributesConfig struct { - VcenterClusterName ResourceAttributeConfig `river:"vcenter.cluster.name,block,optional"` - VcenterDatastoreName ResourceAttributeConfig `river:"vcenter.datastore.name,block,optional"` - VcenterHostName ResourceAttributeConfig `river:"vcenter.host.name,block,optional"` - VcenterResourcePoolInventoryPath ResourceAttributeConfig `river:"vcenter.resource_pool.inventory_path,block,optional"` - VcenterResourcePoolName ResourceAttributeConfig `river:"vcenter.resource_pool.name,block,optional"` - VcenterVMID ResourceAttributeConfig `river:"vcenter.vm.id,block,optional"` - VcenterVMName ResourceAttributeConfig `river:"vcenter.vm.name,block,optional"` + VcenterClusterName ResourceAttributeConfig `alloy:"vcenter.cluster.name,block,optional"` + VcenterDatastoreName ResourceAttributeConfig `alloy:"vcenter.datastore.name,block,optional"` + VcenterHostName ResourceAttributeConfig `alloy:"vcenter.host.name,block,optional"` + VcenterResourcePoolInventoryPath ResourceAttributeConfig `alloy:"vcenter.resource_pool.inventory_path,block,optional"` + VcenterResourcePoolName ResourceAttributeConfig `alloy:"vcenter.resource_pool.name,block,optional"` + VcenterVMID ResourceAttributeConfig `alloy:"vcenter.vm.id,block,optional"` + VcenterVMName ResourceAttributeConfig `alloy:"vcenter.vm.name,block,optional"` } func (args *ResourceAttributesConfig) SetToDefault() { @@ -232,8 +232,8 @@ func (args *ResourceAttributesConfig) Convert() map[string]interface{} { } type MetricsBuilderConfig struct { - Metrics MetricsConfig `river:"metrics,block,optional"` - ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` + Metrics MetricsConfig `alloy:"metrics,block,optional"` + ResourceAttributes ResourceAttributesConfig `alloy:"resource_attributes,block,optional"` } func (mbc *MetricsBuilderConfig) SetToDefault() { @@ -257,20 +257,20 @@ func (args *MetricsBuilderConfig) Convert() map[string]interface{} { // Arguments configures the otelcol.receiver.vcenter component. type Arguments struct { - Endpoint string `river:"endpoint,attr"` - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` + Endpoint string `alloy:"endpoint,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` - MetricsBuilderConfig MetricsBuilderConfig `river:",squash"` + MetricsBuilderConfig MetricsBuilderConfig `alloy:",squash"` - ScraperControllerArguments otelcol.ScraperControllerArguments `river:",squash"` - TLS otelcol.TLSClientArguments `river:"tls,block,optional"` + ScraperControllerArguments otelcol.ScraperControllerArguments `alloy:",squash"` + TLS otelcol.TLSClientArguments `alloy:"tls,block,optional"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var _ receiver.Arguments = Arguments{} diff --git a/internal/component/otelcol/receiver/zipkin/zipkin.go b/internal/component/otelcol/receiver/zipkin/zipkin.go index 6cf15ac2a1..f63cc06183 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin.go @@ -26,15 +26,15 @@ func init() { // Arguments configures the otelcol.receiver.zipkin component. type Arguments struct { - ParseStringTags bool `river:"parse_string_tags,attr,optional"` + ParseStringTags bool `alloy:"parse_string_tags,attr,optional"` - HTTPServer otelcol.HTTPServerArguments `river:",squash"` + HTTPServer otelcol.HTTPServerArguments `alloy:",squash"` // DebugMetrics configures component internal metrics. Optional. - DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + DebugMetrics otelcol.DebugMetricsArguments `alloy:"debug_metrics,block,optional"` // Output configures where to send received data. Required. - Output *otelcol.ConsumerArguments `river:"output,block"` + Output *otelcol.ConsumerArguments `alloy:"output,block"` } var _ receiver.Arguments = Arguments{} diff --git a/internal/component/prometheus/exporter/apache/apache.go b/internal/component/prometheus/exporter/apache/apache.go index 5d0d419e92..5cc988cf1c 100644 --- a/internal/component/prometheus/exporter/apache/apache.go +++ b/internal/component/prometheus/exporter/apache/apache.go @@ -33,9 +33,9 @@ var DefaultArguments = Arguments{ // Arguments controls the apache exporter. type Arguments struct { - ApacheAddr string `river:"scrape_uri,attr,optional"` - ApacheHostOverride string `river:"host_override,attr,optional"` - ApacheInsecure bool `river:"insecure,attr,optional"` + ApacheAddr string `alloy:"scrape_uri,attr,optional"` + ApacheHostOverride string `alloy:"host_override,attr,optional"` + ApacheInsecure bool `alloy:"insecure,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/azure/azure.go b/internal/component/prometheus/exporter/azure/azure.go index 3671cfa783..ed410e7029 100644 --- a/internal/component/prometheus/exporter/azure/azure.go +++ b/internal/component/prometheus/exporter/azure/azure.go @@ -25,20 +25,20 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns } type Arguments struct { - Subscriptions []string `river:"subscriptions,attr"` - ResourceGraphQueryFilter string `river:"resource_graph_query_filter,attr,optional"` - ResourceType string `river:"resource_type,attr"` - Metrics []string `river:"metrics,attr"` - MetricAggregations []string `river:"metric_aggregations,attr,optional"` - Timespan string `river:"timespan,attr,optional"` - IncludedDimensions []string `river:"included_dimensions,attr,optional"` - IncludedResourceTags []string `river:"included_resource_tags,attr,optional"` - MetricNamespace string `river:"metric_namespace,attr,optional"` - MetricNameTemplate string `river:"metric_name_template,attr,optional"` - MetricHelpTemplate string `river:"metric_help_template,attr,optional"` - AzureCloudEnvironment string `river:"azure_cloud_environment,attr,optional"` - ValidateDimensions bool `river:"validate_dimensions,attr,optional"` - Regions []string `river:"regions,attr,optional"` + Subscriptions []string `alloy:"subscriptions,attr"` + ResourceGraphQueryFilter string `alloy:"resource_graph_query_filter,attr,optional"` + ResourceType string `alloy:"resource_type,attr"` + Metrics []string `alloy:"metrics,attr"` + MetricAggregations []string `alloy:"metric_aggregations,attr,optional"` + Timespan string `alloy:"timespan,attr,optional"` + IncludedDimensions []string `alloy:"included_dimensions,attr,optional"` + IncludedResourceTags []string `alloy:"included_resource_tags,attr,optional"` + MetricNamespace string `alloy:"metric_namespace,attr,optional"` + MetricNameTemplate string `alloy:"metric_name_template,attr,optional"` + MetricHelpTemplate string `alloy:"metric_help_template,attr,optional"` + AzureCloudEnvironment string `alloy:"azure_cloud_environment,attr,optional"` + ValidateDimensions bool `alloy:"validate_dimensions,attr,optional"` + Regions []string `alloy:"regions,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/blackbox/blackbox.go b/internal/component/prometheus/exporter/blackbox/blackbox.go index 5e1e84c831..e32a630660 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox.go @@ -69,10 +69,10 @@ var DefaultArguments = Arguments{ // BlackboxTarget defines a target to be used by the exporter. type BlackboxTarget struct { - Name string `river:"name,attr"` - Target string `river:"address,attr"` - Module string `river:"module,attr,optional"` - Labels map[string]string `river:"labels,attr,optional"` + Name string `alloy:"name,attr"` + Target string `alloy:"address,attr"` + Module string `alloy:"module,attr,optional"` + Labels map[string]string `alloy:"labels,attr,optional"` } type TargetBlock []BlackboxTarget @@ -91,10 +91,10 @@ func (t TargetBlock) Convert() []blackbox_exporter.BlackboxTarget { } type Arguments struct { - ConfigFile string `river:"config_file,attr,optional"` - Config alloytypes.OptionalSecret `river:"config,attr,optional"` - Targets TargetBlock `river:"target,block"` - ProbeTimeoutOffset time.Duration `river:"probe_timeout_offset,attr,optional"` + ConfigFile string `alloy:"config_file,attr,optional"` + Config alloytypes.OptionalSecret `alloy:"config,attr,optional"` + Targets TargetBlock `alloy:"target,block"` + ProbeTimeoutOffset time.Duration `alloy:"probe_timeout_offset,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor.go b/internal/component/prometheus/exporter/cadvisor/cadvisor.go index e28b4e1c7d..223203dffa 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor.go @@ -28,24 +28,24 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns // Arguments configures the prometheus.exporter.cadvisor component. type Arguments struct { - StoreContainerLabels bool `river:"store_container_labels,attr,optional"` - AllowlistedContainerLabels []string `river:"allowlisted_container_labels,attr,optional"` - EnvMetadataAllowlist []string `river:"env_metadata_allowlist,attr,optional"` - RawCgroupPrefixAllowlist []string `river:"raw_cgroup_prefix_allowlist,attr,optional"` - PerfEventsConfig string `river:"perf_events_config,attr,optional"` - ResctrlInterval time.Duration `river:"resctrl_interval,attr,optional"` - DisabledMetrics []string `river:"disabled_metrics,attr,optional"` - EnabledMetrics []string `river:"enabled_metrics,attr,optional"` - StorageDuration time.Duration `river:"storage_duration,attr,optional"` - ContainerdHost string `river:"containerd_host,attr,optional"` - ContainerdNamespace string `river:"containerd_namespace,attr,optional"` - DockerHost string `river:"docker_host,attr,optional"` - UseDockerTLS bool `river:"use_docker_tls,attr,optional"` - DockerTLSCert string `river:"docker_tls_cert,attr,optional"` - DockerTLSKey string `river:"docker_tls_key,attr,optional"` - DockerTLSCA string `river:"docker_tls_ca,attr,optional"` - DockerOnly bool `river:"docker_only,attr,optional"` - DisableRootCgroupStats bool `river:"disable_root_cgroup_stats,attr,optional"` + StoreContainerLabels bool `alloy:"store_container_labels,attr,optional"` + AllowlistedContainerLabels []string `alloy:"allowlisted_container_labels,attr,optional"` + EnvMetadataAllowlist []string `alloy:"env_metadata_allowlist,attr,optional"` + RawCgroupPrefixAllowlist []string `alloy:"raw_cgroup_prefix_allowlist,attr,optional"` + PerfEventsConfig string `alloy:"perf_events_config,attr,optional"` + ResctrlInterval time.Duration `alloy:"resctrl_interval,attr,optional"` + DisabledMetrics []string `alloy:"disabled_metrics,attr,optional"` + EnabledMetrics []string `alloy:"enabled_metrics,attr,optional"` + StorageDuration time.Duration `alloy:"storage_duration,attr,optional"` + ContainerdHost string `alloy:"containerd_host,attr,optional"` + ContainerdNamespace string `alloy:"containerd_namespace,attr,optional"` + DockerHost string `alloy:"docker_host,attr,optional"` + UseDockerTLS bool `alloy:"use_docker_tls,attr,optional"` + DockerTLSCert string `alloy:"docker_tls_cert,attr,optional"` + DockerTLSKey string `alloy:"docker_tls_key,attr,optional"` + DockerTLSCA string `alloy:"docker_tls_ca,attr,optional"` + DockerOnly bool `alloy:"docker_only,attr,optional"` + DisableRootCgroupStats bool `alloy:"disable_root_cgroup_stats,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/cloudwatch/config.go b/internal/component/prometheus/exporter/cloudwatch/config.go index bd751d8ceb..c87de8b372 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config.go +++ b/internal/component/prometheus/exporter/cloudwatch/config.go @@ -30,33 +30,33 @@ var defaults = Arguments{ // Arguments are the river based options to configure the embedded CloudWatch exporter. type Arguments struct { - STSRegion string `river:"sts_region,attr"` - FIPSDisabled bool `river:"fips_disabled,attr,optional"` - Debug bool `river:"debug,attr,optional"` - DiscoveryExportedTags TagsPerNamespace `river:"discovery_exported_tags,attr,optional"` - Discovery []DiscoveryJob `river:"discovery,block,optional"` - Static []StaticJob `river:"static,block,optional"` - DecoupledScrape DecoupledScrapeConfig `river:"decoupled_scraping,block,optional"` + STSRegion string `alloy:"sts_region,attr"` + FIPSDisabled bool `alloy:"fips_disabled,attr,optional"` + Debug bool `alloy:"debug,attr,optional"` + DiscoveryExportedTags TagsPerNamespace `alloy:"discovery_exported_tags,attr,optional"` + Discovery []DiscoveryJob `alloy:"discovery,block,optional"` + Static []StaticJob `alloy:"static,block,optional"` + DecoupledScrape DecoupledScrapeConfig `alloy:"decoupled_scraping,block,optional"` } // DecoupledScrapeConfig is the configuration for decoupled scraping feature. type DecoupledScrapeConfig struct { - Enabled bool `river:"enabled,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` // ScrapeInterval defines the decoupled scraping interval. If left empty, a default interval of 5m is used - ScrapeInterval time.Duration `river:"scrape_interval,attr,optional"` + ScrapeInterval time.Duration `alloy:"scrape_interval,attr,optional"` } type TagsPerNamespace = cloudwatch_exporter.TagsPerNamespace // DiscoveryJob configures a discovery job for a given service. type DiscoveryJob struct { - Auth RegionAndRoles `river:",squash"` - CustomTags Tags `river:"custom_tags,attr,optional"` - SearchTags Tags `river:"search_tags,attr,optional"` - Type string `river:"type,attr"` - DimensionNameRequirements []string `river:"dimension_name_requirements,attr,optional"` - Metrics []Metric `river:"metric,block"` - NilToZero *bool `river:"nil_to_zero,attr,optional"` + Auth RegionAndRoles `alloy:",squash"` + CustomTags Tags `alloy:"custom_tags,attr,optional"` + SearchTags Tags `alloy:"search_tags,attr,optional"` + Type string `alloy:"type,attr"` + DimensionNameRequirements []string `alloy:"dimension_name_requirements,attr,optional"` + Metrics []Metric `alloy:"metric,block"` + NilToZero *bool `alloy:"nil_to_zero,attr,optional"` } // Tags represents a series of tags configured on an AWS resource. Each tag is a @@ -65,25 +65,25 @@ type Tags map[string]string // StaticJob will scrape metrics that match all defined dimensions. type StaticJob struct { - Name string `river:",label"` - Auth RegionAndRoles `river:",squash"` - CustomTags Tags `river:"custom_tags,attr,optional"` - Namespace string `river:"namespace,attr"` - Dimensions Dimensions `river:"dimensions,attr"` - Metrics []Metric `river:"metric,block"` - NilToZero *bool `river:"nil_to_zero,attr,optional"` + Name string `alloy:",label"` + Auth RegionAndRoles `alloy:",squash"` + CustomTags Tags `alloy:"custom_tags,attr,optional"` + Namespace string `alloy:"namespace,attr"` + Dimensions Dimensions `alloy:"dimensions,attr"` + Metrics []Metric `alloy:"metric,block"` + NilToZero *bool `alloy:"nil_to_zero,attr,optional"` } // RegionAndRoles exposes for each supported job, the AWS regions and IAM roles in which the agent should perform the // scrape. type RegionAndRoles struct { - Regions []string `river:"regions,attr"` - Roles []Role `river:"role,block,optional"` + Regions []string `alloy:"regions,attr"` + Roles []Role `alloy:"role,block,optional"` } type Role struct { - RoleArn string `river:"role_arn,attr"` - ExternalID string `river:"external_id,attr,optional"` + RoleArn string `alloy:"role_arn,attr"` + ExternalID string `alloy:"external_id,attr,optional"` } // Dimensions are the label values used to identify a unique metric stream in CloudWatch. @@ -91,11 +91,11 @@ type Role struct { type Dimensions map[string]string type Metric struct { - Name string `river:"name,attr"` - Statistics []string `river:"statistics,attr"` - Period time.Duration `river:"period,attr"` - Length time.Duration `river:"length,attr,optional"` - NilToZero *bool `river:"nil_to_zero,attr,optional"` + Name string `alloy:"name,attr"` + Statistics []string `alloy:"statistics,attr"` + Period time.Duration `alloy:"period,attr"` + Length time.Duration `alloy:"length,attr,optional"` + NilToZero *bool `alloy:"nil_to_zero,attr,optional"` } // SetToDefault implements syntax.Defaulter. diff --git a/internal/component/prometheus/exporter/consul/consul.go b/internal/component/prometheus/exporter/consul/consul.go index 60ce1d6d4b..9647dc32b2 100644 --- a/internal/component/prometheus/exporter/consul/consul.go +++ b/internal/component/prometheus/exporter/consul/consul.go @@ -37,20 +37,20 @@ var DefaultArguments = Arguments{ // Arguments controls the consul_exporter exporter. type Arguments struct { - Server string `river:"server,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - ServerName string `river:"server_name,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - RequestLimit int `river:"concurrent_request_limit,attr,optional"` - AllowStale bool `river:"allow_stale,attr,optional"` - RequireConsistent bool `river:"require_consistent,attr,optional"` + Server string `alloy:"server,attr,optional"` + CAFile string `alloy:"ca_file,attr,optional"` + CertFile string `alloy:"cert_file,attr,optional"` + KeyFile string `alloy:"key_file,attr,optional"` + ServerName string `alloy:"server_name,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + InsecureSkipVerify bool `alloy:"insecure_skip_verify,attr,optional"` + RequestLimit int `alloy:"concurrent_request_limit,attr,optional"` + AllowStale bool `alloy:"allow_stale,attr,optional"` + RequireConsistent bool `alloy:"require_consistent,attr,optional"` - KVPrefix string `river:"kv_prefix,attr,optional"` - KVFilter string `river:"kv_filter,attr,optional"` - HealthSummary bool `river:"generate_health_summary,attr,optional"` + KVPrefix string `alloy:"kv_prefix,attr,optional"` + KVFilter string `alloy:"kv_filter,attr,optional"` + HealthSummary bool `alloy:"generate_health_summary,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go b/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go index dc261f5758..bd3abc2077 100644 --- a/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go +++ b/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go @@ -34,13 +34,13 @@ var DefaultArguments = Arguments{ // Arguments configures the prometheus.exporter.dnsmasq component. type Arguments struct { // Address is the address of the dnsmasq server to connect to (host:port). - Address string `river:"address,attr,optional"` + Address string `alloy:"address,attr,optional"` // LeasesFile is the path to the dnsmasq leases file. - LeasesFile string `river:"leases_file,attr,optional"` + LeasesFile string `alloy:"leases_file,attr,optional"` // ExposeLeases controls whether expose dnsmasq leases as metrics (high cardinality). - ExposeLeases bool `river:"expose_leases,attr,optional"` + ExposeLeases bool `alloy:"expose_leases,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go index 06811eb68c..6cd3b98c3c 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go @@ -38,24 +38,24 @@ var DefaultArguments = Arguments{ } type Arguments struct { - Address string `river:"address,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - AllNodes bool `river:"all,attr,optional"` - Node string `river:"node,attr,optional"` - ExportIndices bool `river:"indices,attr,optional"` - ExportIndicesSettings bool `river:"indices_settings,attr,optional"` - ExportClusterSettings bool `river:"cluster_settings,attr,optional"` - ExportShards bool `river:"shards,attr,optional"` - IncludeAliases bool `river:"aliases,attr,optional"` - ExportSnapshots bool `river:"snapshots,attr,optional"` - ExportClusterInfoInterval time.Duration `river:"clusterinfo_interval,attr,optional"` - CA string `river:"ca,attr,optional"` - ClientPrivateKey string `river:"client_private_key,attr,optional"` - ClientCert string `river:"client_cert,attr,optional"` - InsecureSkipVerify bool `river:"ssl_skip_verify,attr,optional"` - ExportDataStreams bool `river:"data_stream,attr,optional"` - ExportSLM bool `river:"slm,attr,optional"` - BasicAuth *commonCfg.BasicAuth `river:"basic_auth,block,optional"` + Address string `alloy:"address,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + AllNodes bool `alloy:"all,attr,optional"` + Node string `alloy:"node,attr,optional"` + ExportIndices bool `alloy:"indices,attr,optional"` + ExportIndicesSettings bool `alloy:"indices_settings,attr,optional"` + ExportClusterSettings bool `alloy:"cluster_settings,attr,optional"` + ExportShards bool `alloy:"shards,attr,optional"` + IncludeAliases bool `alloy:"aliases,attr,optional"` + ExportSnapshots bool `alloy:"snapshots,attr,optional"` + ExportClusterInfoInterval time.Duration `alloy:"clusterinfo_interval,attr,optional"` + CA string `alloy:"ca,attr,optional"` + ClientPrivateKey string `alloy:"client_private_key,attr,optional"` + ClientCert string `alloy:"client_cert,attr,optional"` + InsecureSkipVerify bool `alloy:"ssl_skip_verify,attr,optional"` + ExportDataStreams bool `alloy:"data_stream,attr,optional"` + ExportSLM bool `alloy:"slm,attr,optional"` + BasicAuth *commonCfg.BasicAuth `alloy:"basic_auth,block,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/exporter.go b/internal/component/prometheus/exporter/exporter.go index c184e1c12e..40572f21a0 100644 --- a/internal/component/prometheus/exporter/exporter.go +++ b/internal/component/prometheus/exporter/exporter.go @@ -22,7 +22,7 @@ type Creator func(component.Options, component.Arguments, string) (integrations. // Exports are simply a list of targets for a scraper to consume. type Exports struct { - Targets []discovery.Target `river:"targets,attr"` + Targets []discovery.Target `alloy:"targets,attr"` } type Component struct { diff --git a/internal/component/prometheus/exporter/gcp/gcp.go b/internal/component/prometheus/exporter/gcp/gcp.go index d9dbbf16ad..b3fb702f7a 100644 --- a/internal/component/prometheus/exporter/gcp/gcp.go +++ b/internal/component/prometheus/exporter/gcp/gcp.go @@ -27,14 +27,14 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns } type Arguments struct { - ProjectIDs []string `river:"project_ids,attr"` - MetricPrefixes []string `river:"metrics_prefixes,attr"` - ExtraFilters []string `river:"extra_filters,attr,optional"` - RequestInterval time.Duration `river:"request_interval,attr,optional"` - RequestOffset time.Duration `river:"request_offset,attr,optional"` - IngestDelay bool `river:"ingest_delay,attr,optional"` - DropDelegatedProjects bool `river:"drop_delegated_projects,attr,optional"` - ClientTimeout time.Duration `river:"gcp_client_timeout,attr,optional"` + ProjectIDs []string `alloy:"project_ids,attr"` + MetricPrefixes []string `alloy:"metrics_prefixes,attr"` + ExtraFilters []string `alloy:"extra_filters,attr,optional"` + RequestInterval time.Duration `alloy:"request_interval,attr,optional"` + RequestOffset time.Duration `alloy:"request_offset,attr,optional"` + IngestDelay bool `alloy:"ingest_delay,attr,optional"` + DropDelegatedProjects bool `alloy:"drop_delegated_projects,attr,optional"` + ClientTimeout time.Duration `alloy:"gcp_client_timeout,attr,optional"` } var DefaultArguments = Arguments{ diff --git a/internal/component/prometheus/exporter/github/github.go b/internal/component/prometheus/exporter/github/github.go index eca1679719..1b321f0ce5 100644 --- a/internal/component/prometheus/exporter/github/github.go +++ b/internal/component/prometheus/exporter/github/github.go @@ -33,12 +33,12 @@ var DefaultArguments = Arguments{ } type Arguments struct { - APIURL string `river:"api_url,attr,optional"` - Repositories []string `river:"repositories,attr,optional"` - Organizations []string `river:"organizations,attr,optional"` - Users []string `river:"users,attr,optional"` - APIToken alloytypes.Secret `river:"api_token,attr,optional"` - APITokenFile string `river:"api_token_file,attr,optional"` + APIURL string `alloy:"api_url,attr,optional"` + Repositories []string `alloy:"repositories,attr,optional"` + Organizations []string `alloy:"organizations,attr,optional"` + Users []string `alloy:"users,attr,optional"` + APIToken alloytypes.Secret `alloy:"api_token,attr,optional"` + APITokenFile string `alloy:"api_token_file,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/kafka/kafka.go b/internal/component/prometheus/exporter/kafka/kafka.go index 604b04e018..95738f53d8 100644 --- a/internal/component/prometheus/exporter/kafka/kafka.go +++ b/internal/component/prometheus/exporter/kafka/kafka.go @@ -26,28 +26,28 @@ var DefaultArguments = Arguments{ } type Arguments struct { - Instance string `river:"instance,attr,optional"` - KafkaURIs []string `river:"kafka_uris,attr,optional"` - UseSASL bool `river:"use_sasl,attr,optional"` - UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` - SASLUsername string `river:"sasl_username,attr,optional"` - SASLPassword alloytypes.Secret `river:"sasl_password,attr,optional"` - SASLMechanism string `river:"sasl_mechanism,attr,optional"` - UseTLS bool `river:"use_tls,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - KafkaVersion string `river:"kafka_version,attr,optional"` - UseZooKeeperLag bool `river:"use_zookeeper_lag,attr,optional"` - ZookeeperURIs []string `river:"zookeeper_uris,attr,optional"` - ClusterName string `river:"kafka_cluster_name,attr,optional"` - MetadataRefreshInterval string `river:"metadata_refresh_interval,attr,optional"` - AllowConcurrent bool `river:"allow_concurrency,attr,optional"` - MaxOffsets int `river:"max_offsets,attr,optional"` - PruneIntervalSeconds int `river:"prune_interval_seconds,attr,optional"` - TopicsFilter string `river:"topics_filter_regex,attr,optional"` - GroupFilter string `river:"groups_filter_regex,attr,optional"` + Instance string `alloy:"instance,attr,optional"` + KafkaURIs []string `alloy:"kafka_uris,attr,optional"` + UseSASL bool `alloy:"use_sasl,attr,optional"` + UseSASLHandshake bool `alloy:"use_sasl_handshake,attr,optional"` + SASLUsername string `alloy:"sasl_username,attr,optional"` + SASLPassword alloytypes.Secret `alloy:"sasl_password,attr,optional"` + SASLMechanism string `alloy:"sasl_mechanism,attr,optional"` + UseTLS bool `alloy:"use_tls,attr,optional"` + CAFile string `alloy:"ca_file,attr,optional"` + CertFile string `alloy:"cert_file,attr,optional"` + KeyFile string `alloy:"key_file,attr,optional"` + InsecureSkipVerify bool `alloy:"insecure_skip_verify,attr,optional"` + KafkaVersion string `alloy:"kafka_version,attr,optional"` + UseZooKeeperLag bool `alloy:"use_zookeeper_lag,attr,optional"` + ZookeeperURIs []string `alloy:"zookeeper_uris,attr,optional"` + ClusterName string `alloy:"kafka_cluster_name,attr,optional"` + MetadataRefreshInterval string `alloy:"metadata_refresh_interval,attr,optional"` + AllowConcurrent bool `alloy:"allow_concurrency,attr,optional"` + MaxOffsets int `alloy:"max_offsets,attr,optional"` + PruneIntervalSeconds int `alloy:"prune_interval_seconds,attr,optional"` + TopicsFilter string `alloy:"topics_filter_regex,attr,optional"` + GroupFilter string `alloy:"groups_filter_regex,attr,optional"` } func init() { diff --git a/internal/component/prometheus/exporter/memcached/memcached.go b/internal/component/prometheus/exporter/memcached/memcached.go index 516d05b24e..2a1a6d3588 100644 --- a/internal/component/prometheus/exporter/memcached/memcached.go +++ b/internal/component/prometheus/exporter/memcached/memcached.go @@ -36,14 +36,14 @@ var DefaultArguments = Arguments{ // Arguments configures the prometheus.exporter.memcached component. type Arguments struct { // Address is the address of the memcached server to connect to (host:port). - Address string `river:"address,attr,optional"` + Address string `alloy:"address,attr,optional"` // Timeout is the timeout for the memcached exporter to use when connecting to the // memcached server. - Timeout time.Duration `river:"timeout,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` // TLSConfig is used to configure TLS for connection to memcached. - TLSConfig *config.TLSConfig `river:"tls_config,block,optional"` + TLSConfig *config.TLSConfig `alloy:"tls_config,block,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/mongodb/mongodb.go b/internal/component/prometheus/exporter/mongodb/mongodb.go index cd46d1ebe1..c59c3eeaab 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb.go @@ -27,10 +27,10 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns } type Arguments struct { - URI alloytypes.Secret `river:"mongodb_uri,attr"` - DirectConnect bool `river:"direct_connect,attr,optional"` - DiscoveringMode bool `river:"discovering_mode,attr,optional"` - TLSBasicAuthConfigPath string `river:"tls_basic_auth_config_path,attr,optional"` + URI alloytypes.Secret `alloy:"mongodb_uri,attr"` + DirectConnect bool `alloy:"direct_connect,attr,optional"` + DiscoveringMode bool `alloy:"discovering_mode,attr,optional"` + TLSBasicAuthConfigPath string `alloy:"tls_basic_auth_config_path,attr,optional"` } func (a *Arguments) Convert() *mongodb_exporter.Config { diff --git a/internal/component/prometheus/exporter/mssql/mssql.go b/internal/component/prometheus/exporter/mssql/mssql.go index dad15af1db..e50f064919 100644 --- a/internal/component/prometheus/exporter/mssql/mssql.go +++ b/internal/component/prometheus/exporter/mssql/mssql.go @@ -42,11 +42,11 @@ var DefaultArguments = Arguments{ // Arguments controls the mssql exporter. type Arguments struct { - ConnectionString alloytypes.Secret `river:"connection_string,attr"` - MaxIdleConnections int `river:"max_idle_connections,attr,optional"` - MaxOpenConnections int `river:"max_open_connections,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - QueryConfig alloytypes.OptionalSecret `river:"query_config,attr,optional"` + ConnectionString alloytypes.Secret `alloy:"connection_string,attr"` + MaxIdleConnections int `alloy:"max_idle_connections,attr,optional"` + MaxOpenConnections int `alloy:"max_open_connections,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + QueryConfig alloytypes.OptionalSecret `alloy:"query_config,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/mysql/mysql.go b/internal/component/prometheus/exporter/mysql/mysql.go index 6f24e5d482..3fb75f2c52 100644 --- a/internal/component/prometheus/exporter/mysql/mysql.go +++ b/internal/component/prometheus/exporter/mysql/mysql.go @@ -58,71 +58,71 @@ var DefaultArguments = Arguments{ // Arguments controls the mysql component. type Arguments struct { // DataSourceName to use to connect to MySQL. - DataSourceName alloytypes.Secret `river:"data_source_name,attr,optional"` + DataSourceName alloytypes.Secret `alloy:"data_source_name,attr,optional"` // Collectors to mark as enabled in addition to the default. - EnableCollectors []string `river:"enable_collectors,attr,optional"` + EnableCollectors []string `alloy:"enable_collectors,attr,optional"` // Collectors to explicitly mark as disabled. - DisableCollectors []string `river:"disable_collectors,attr,optional"` + DisableCollectors []string `alloy:"disable_collectors,attr,optional"` // Overrides the default set of enabled collectors with the given list. - SetCollectors []string `river:"set_collectors,attr,optional"` + SetCollectors []string `alloy:"set_collectors,attr,optional"` // Collector-wide options - LockWaitTimeout int `river:"lock_wait_timeout,attr,optional"` - LogSlowFilter bool `river:"log_slow_filter,attr,optional"` + LockWaitTimeout int `alloy:"lock_wait_timeout,attr,optional"` + LogSlowFilter bool `alloy:"log_slow_filter,attr,optional"` // Collector-specific config options - InfoSchemaProcessList InfoSchemaProcessList `river:"info_schema.processlist,block,optional"` - InfoSchemaTables InfoSchemaTables `river:"info_schema.tables,block,optional"` - PerfSchemaEventsStatements PerfSchemaEventsStatements `river:"perf_schema.eventsstatements,block,optional"` - PerfSchemaFileInstances PerfSchemaFileInstances `river:"perf_schema.file_instances,block,optional"` - PerfSchemaMemoryEvents PerfSchemaMemoryEvents `river:"perf_schema.memory_events,block,optional"` - - Heartbeat Heartbeat `river:"heartbeat,block,optional"` - MySQLUser MySQLUser `river:"mysql.user,block,optional"` + InfoSchemaProcessList InfoSchemaProcessList `alloy:"info_schema.processlist,block,optional"` + InfoSchemaTables InfoSchemaTables `alloy:"info_schema.tables,block,optional"` + PerfSchemaEventsStatements PerfSchemaEventsStatements `alloy:"perf_schema.eventsstatements,block,optional"` + PerfSchemaFileInstances PerfSchemaFileInstances `alloy:"perf_schema.file_instances,block,optional"` + PerfSchemaMemoryEvents PerfSchemaMemoryEvents `alloy:"perf_schema.memory_events,block,optional"` + + Heartbeat Heartbeat `alloy:"heartbeat,block,optional"` + MySQLUser MySQLUser `alloy:"mysql.user,block,optional"` } // InfoSchemaProcessList configures the info_schema.processlist collector type InfoSchemaProcessList struct { - MinTime int `river:"min_time,attr,optional"` - ProcessesByUser bool `river:"processes_by_user,attr,optional"` - ProcessesByHost bool `river:"processes_by_host,attr,optional"` + MinTime int `alloy:"min_time,attr,optional"` + ProcessesByUser bool `alloy:"processes_by_user,attr,optional"` + ProcessesByHost bool `alloy:"processes_by_host,attr,optional"` } // InfoSchemaTables configures the info_schema.tables collector type InfoSchemaTables struct { - Databases string `river:"databases,attr,optional"` + Databases string `alloy:"databases,attr,optional"` } // PerfSchemaEventsStatements configures the perf_schema.eventsstatements collector type PerfSchemaEventsStatements struct { - Limit int `river:"limit,attr,optional"` - TimeLimit int `river:"time_limit,attr,optional"` - TextLimit int `river:"text_limit,attr,optional"` + Limit int `alloy:"limit,attr,optional"` + TimeLimit int `alloy:"time_limit,attr,optional"` + TextLimit int `alloy:"text_limit,attr,optional"` } // PerfSchemaFileInstances configures the perf_schema.file_instances collector type PerfSchemaFileInstances struct { - Filter string `river:"filter,attr,optional"` - RemovePrefix string `river:"remove_prefix,attr,optional"` + Filter string `alloy:"filter,attr,optional"` + RemovePrefix string `alloy:"remove_prefix,attr,optional"` } // PerfSchemaMemoryEvents configures the perf_schema.memory_events collector type PerfSchemaMemoryEvents struct { - RemovePrefix string `river:"remove_prefix,attr,optional"` + RemovePrefix string `alloy:"remove_prefix,attr,optional"` } // Heartbeat controls the heartbeat collector type Heartbeat struct { - Database string `river:"database,attr,optional"` - Table string `river:"table,attr,optional"` - UTC bool `river:"utc,attr,optional"` + Database string `alloy:"database,attr,optional"` + Table string `alloy:"table,attr,optional"` + UTC bool `alloy:"utc,attr,optional"` } // MySQLUser controls the mysql.user collector type MySQLUser struct { - Privileges bool `river:"privileges,attr,optional"` + Privileges bool `alloy:"privileges,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/oracledb/oracledb.go b/internal/component/prometheus/exporter/oracledb/oracledb.go index 21ea2909b6..dd71616d68 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb.go @@ -44,10 +44,10 @@ var ( // Arguments controls the oracledb exporter. type Arguments struct { - ConnectionString alloytypes.Secret `river:"connection_string,attr"` - MaxIdleConns int `river:"max_idle_conns,attr,optional"` - MaxOpenConns int `river:"max_open_conns,attr,optional"` - QueryTimeout int `river:"query_timeout,attr,optional"` + ConnectionString alloytypes.Secret `alloy:"connection_string,attr"` + MaxIdleConns int `alloy:"max_idle_conns,attr,optional"` + MaxOpenConns int `alloy:"max_open_conns,attr,optional"` + QueryTimeout int `alloy:"query_timeout,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/postgres/postgres.go b/internal/component/prometheus/exporter/postgres/postgres.go index d5e2e0d1b7..ec227308a3 100644 --- a/internal/component/prometheus/exporter/postgres/postgres.go +++ b/internal/component/prometheus/exporter/postgres/postgres.go @@ -77,22 +77,22 @@ type Arguments struct { // DataSourceNames to use to connect to Postgres. This is marked optional because it // may also be supplied by the POSTGRES_EXPORTER_DATA_SOURCE_NAME env var, // though it is not recommended to do so. - DataSourceNames []alloytypes.Secret `river:"data_source_names,attr,optional"` + DataSourceNames []alloytypes.Secret `alloy:"data_source_names,attr,optional"` // Attributes - DisableSettingsMetrics bool `river:"disable_settings_metrics,attr,optional"` - DisableDefaultMetrics bool `river:"disable_default_metrics,attr,optional"` - CustomQueriesConfigPath string `river:"custom_queries_config_path,attr,optional"` + DisableSettingsMetrics bool `alloy:"disable_settings_metrics,attr,optional"` + DisableDefaultMetrics bool `alloy:"disable_default_metrics,attr,optional"` + CustomQueriesConfigPath string `alloy:"custom_queries_config_path,attr,optional"` // Blocks - AutoDiscovery AutoDiscovery `river:"autodiscovery,block,optional"` + AutoDiscovery AutoDiscovery `alloy:"autodiscovery,block,optional"` } // Autodiscovery controls discovery of databases outside any specified in DataSourceNames. type AutoDiscovery struct { - Enabled bool `river:"enabled,attr,optional"` - DatabaseAllowlist []string `river:"database_allowlist,attr,optional"` - DatabaseDenylist []string `river:"database_denylist,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + DatabaseAllowlist []string `alloy:"database_allowlist,attr,optional"` + DatabaseDenylist []string `alloy:"database_denylist,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/process/process.go b/internal/component/prometheus/exporter/process/process.go index 187aee4a76..fbf27a256a 100644 --- a/internal/component/prometheus/exporter/process/process.go +++ b/internal/component/prometheus/exporter/process/process.go @@ -37,21 +37,21 @@ var DefaultArguments = Arguments{ // Arguments configures the prometheus.exporter.process component type Arguments struct { - ProcessExporter []MatcherGroup `river:"matcher,block,optional"` + ProcessExporter []MatcherGroup `alloy:"matcher,block,optional"` - ProcFSPath string `river:"procfs_path,attr,optional"` - Children bool `river:"track_children,attr,optional"` - Threads bool `river:"track_threads,attr,optional"` - SMaps bool `river:"gather_smaps,attr,optional"` - Recheck bool `river:"recheck_on_scrape,attr,optional"` + ProcFSPath string `alloy:"procfs_path,attr,optional"` + Children bool `alloy:"track_children,attr,optional"` + Threads bool `alloy:"track_threads,attr,optional"` + SMaps bool `alloy:"gather_smaps,attr,optional"` + Recheck bool `alloy:"recheck_on_scrape,attr,optional"` } // MatcherGroup taken and converted to River from github.com/ncabatoff/process-exporter/config type MatcherGroup struct { - Name string `river:"name,attr,optional"` - CommRules []string `river:"comm,attr,optional"` - ExeRules []string `river:"exe,attr,optional"` - CmdlineRules []string `river:"cmdline,attr,optional"` + Name string `alloy:"name,attr,optional"` + CommRules []string `alloy:"comm,attr,optional"` + ExeRules []string `alloy:"exe,attr,optional"` + CmdlineRules []string `alloy:"cmdline,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/redis/redis.go b/internal/component/prometheus/exporter/redis/redis.go index cfb8f7459a..61bd9977c4 100644 --- a/internal/component/prometheus/exporter/redis/redis.go +++ b/internal/component/prometheus/exporter/redis/redis.go @@ -44,43 +44,43 @@ var DefaultArguments = Arguments{ } type Arguments struct { - IncludeExporterMetrics bool `river:"include_exporter_metrics,attr,optional"` + IncludeExporterMetrics bool `alloy:"include_exporter_metrics,attr,optional"` // exporter-specific config. // // The exporter binary config differs to this, but these // are the only fields that are relevant to the exporter struct. - RedisAddr string `river:"redis_addr,attr"` - RedisUser string `river:"redis_user,attr,optional"` - RedisPassword alloytypes.Secret `river:"redis_password,attr,optional"` - RedisPasswordFile string `river:"redis_password_file,attr,optional"` - RedisPasswordMapFile string `river:"redis_password_map_file,attr,optional"` - Namespace string `river:"namespace,attr,optional"` - ConfigCommand string `river:"config_command,attr,optional"` - CheckKeys []string `river:"check_keys,attr,optional"` - CheckKeyGroups []string `river:"check_key_groups,attr,optional"` - CheckKeyGroupsBatchSize int64 `river:"check_key_groups_batch_size,attr,optional"` - MaxDistinctKeyGroups int64 `river:"max_distinct_key_groups,attr,optional"` - CheckSingleKeys []string `river:"check_single_keys,attr,optional"` - CheckStreams []string `river:"check_streams,attr,optional"` - CheckSingleStreams []string `river:"check_single_streams,attr,optional"` - ExportKeyValues bool `river:"export_key_values,attr,optional"` - CountKeys []string `river:"count_keys,attr,optional"` - ScriptPath string `river:"script_path,attr,optional"` - ScriptPaths []string `river:"script_paths,attr,optional"` - ConnectionTimeout time.Duration `river:"connection_timeout,attr,optional"` - TLSClientKeyFile string `river:"tls_client_key_file,attr,optional"` - TLSClientCertFile string `river:"tls_client_cert_file,attr,optional"` - TLSCaCertFile string `river:"tls_ca_cert_file,attr,optional"` - SetClientName bool `river:"set_client_name,attr,optional"` - IsTile38 bool `river:"is_tile38,attr,optional"` - IsCluster bool `river:"is_cluster,attr,optional"` - ExportClientList bool `river:"export_client_list,attr,optional"` - ExportClientPort bool `river:"export_client_port,attr,optional"` - RedisMetricsOnly bool `river:"redis_metrics_only,attr,optional"` - PingOnConnect bool `river:"ping_on_connect,attr,optional"` - InclSystemMetrics bool `river:"incl_system_metrics,attr,optional"` - SkipTLSVerification bool `river:"skip_tls_verification,attr,optional"` + RedisAddr string `alloy:"redis_addr,attr"` + RedisUser string `alloy:"redis_user,attr,optional"` + RedisPassword alloytypes.Secret `alloy:"redis_password,attr,optional"` + RedisPasswordFile string `alloy:"redis_password_file,attr,optional"` + RedisPasswordMapFile string `alloy:"redis_password_map_file,attr,optional"` + Namespace string `alloy:"namespace,attr,optional"` + ConfigCommand string `alloy:"config_command,attr,optional"` + CheckKeys []string `alloy:"check_keys,attr,optional"` + CheckKeyGroups []string `alloy:"check_key_groups,attr,optional"` + CheckKeyGroupsBatchSize int64 `alloy:"check_key_groups_batch_size,attr,optional"` + MaxDistinctKeyGroups int64 `alloy:"max_distinct_key_groups,attr,optional"` + CheckSingleKeys []string `alloy:"check_single_keys,attr,optional"` + CheckStreams []string `alloy:"check_streams,attr,optional"` + CheckSingleStreams []string `alloy:"check_single_streams,attr,optional"` + ExportKeyValues bool `alloy:"export_key_values,attr,optional"` + CountKeys []string `alloy:"count_keys,attr,optional"` + ScriptPath string `alloy:"script_path,attr,optional"` + ScriptPaths []string `alloy:"script_paths,attr,optional"` + ConnectionTimeout time.Duration `alloy:"connection_timeout,attr,optional"` + TLSClientKeyFile string `alloy:"tls_client_key_file,attr,optional"` + TLSClientCertFile string `alloy:"tls_client_cert_file,attr,optional"` + TLSCaCertFile string `alloy:"tls_ca_cert_file,attr,optional"` + SetClientName bool `alloy:"set_client_name,attr,optional"` + IsTile38 bool `alloy:"is_tile38,attr,optional"` + IsCluster bool `alloy:"is_cluster,attr,optional"` + ExportClientList bool `alloy:"export_client_list,attr,optional"` + ExportClientPort bool `alloy:"export_client_port,attr,optional"` + RedisMetricsOnly bool `alloy:"redis_metrics_only,attr,optional"` + PingOnConnect bool `alloy:"ping_on_connect,attr,optional"` + InclSystemMetrics bool `alloy:"incl_system_metrics,attr,optional"` + SkipTLSVerification bool `alloy:"skip_tls_verification,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/snmp/snmp.go b/internal/component/prometheus/exporter/snmp/snmp.go index 286ae7fb35..5ad5df6bfa 100644 --- a/internal/component/prometheus/exporter/snmp/snmp.go +++ b/internal/component/prometheus/exporter/snmp/snmp.go @@ -63,11 +63,11 @@ func buildSNMPTargets(baseTarget discovery.Target, args component.Arguments) []d // SNMPTarget defines a target to be used by the exporter. type SNMPTarget struct { - Name string `river:",label"` - Target string `river:"address,attr"` - Module string `river:"module,attr,optional"` - Auth string `river:"auth,attr,optional"` - WalkParams string `river:"walk_params,attr,optional"` + Name string `alloy:",label"` + Target string `alloy:"address,attr"` + Module string `alloy:"module,attr,optional"` + Auth string `alloy:"auth,attr,optional"` + WalkParams string `alloy:"walk_params,attr,optional"` } type TargetBlock []SNMPTarget @@ -88,11 +88,11 @@ func (t TargetBlock) Convert() []snmp_exporter.SNMPTarget { } type WalkParam struct { - Name string `river:",label"` - MaxRepetitions uint32 `river:"max_repetitions,attr,optional"` - Retries int `river:"retries,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - UseUnconnectedUDPSocket bool `river:"use_unconnected_udp_socket,attr,optional"` + Name string `alloy:",label"` + MaxRepetitions uint32 `alloy:"max_repetitions,attr,optional"` + Retries int `alloy:"retries,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` + UseUnconnectedUDPSocket bool `alloy:"use_unconnected_udp_socket,attr,optional"` } type WalkParams []WalkParam @@ -112,10 +112,10 @@ func (w WalkParams) Convert() map[string]snmp_config.WalkParams { } type Arguments struct { - ConfigFile string `river:"config_file,attr,optional"` - Config alloytypes.OptionalSecret `river:"config,attr,optional"` - Targets TargetBlock `river:"target,block"` - WalkParams WalkParams `river:"walk_param,block,optional"` + ConfigFile string `alloy:"config_file,attr,optional"` + Config alloytypes.OptionalSecret `alloy:"config,attr,optional"` + Targets TargetBlock `alloy:"target,block"` + WalkParams WalkParams `alloy:"walk_param,block,optional"` ConfigStruct snmp_config.Config } diff --git a/internal/component/prometheus/exporter/snowflake/snowflake.go b/internal/component/prometheus/exporter/snowflake/snowflake.go index f377d4edd9..72bd01e4b7 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake.go @@ -33,11 +33,11 @@ var DefaultArguments = Arguments{ // Arguments controls the snowflake exporter. type Arguments struct { - AccountName string `river:"account_name,attr"` - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` - Role string `river:"role,attr,optional"` - Warehouse string `river:"warehouse,attr"` + AccountName string `alloy:"account_name,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` + Role string `alloy:"role,attr,optional"` + Warehouse string `alloy:"warehouse,attr"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/squid/squid.go b/internal/component/prometheus/exporter/squid/squid.go index f5ba5a133c..e08e404cbf 100644 --- a/internal/component/prometheus/exporter/squid/squid.go +++ b/internal/component/prometheus/exporter/squid/squid.go @@ -30,9 +30,9 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns // Arguments controls the squid exporter. type Arguments struct { - SquidAddr string `river:"address,attr"` - SquidUser string `river:"username,attr,optional"` - SquidPassword alloytypes.Secret `river:"password,attr,optional"` + SquidAddr string `alloy:"address,attr"` + SquidUser string `alloy:"username,attr,optional"` + SquidPassword alloytypes.Secret `alloy:"password,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/exporter/statsd/config.go b/internal/component/prometheus/exporter/statsd/config.go index 2b3f11e0bd..18525450a9 100644 --- a/internal/component/prometheus/exporter/statsd/config.go +++ b/internal/component/prometheus/exporter/statsd/config.go @@ -10,26 +10,26 @@ import ( ) type Arguments struct { - ListenUDP string `river:"listen_udp,attr,optional"` - ListenTCP string `river:"listen_tcp,attr,optional"` - ListenUnixgram string `river:"listen_unixgram,attr,optional"` - UnixSocketMode string `river:"unix_socket_mode,attr,optional"` - MappingConfig string `river:"mapping_config_path,attr,optional"` - - ReadBuffer int `river:"read_buffer,attr,optional"` - CacheSize int `river:"cache_size,attr,optional"` - CacheType string `river:"cache_type,attr,optional"` - EventQueueSize int `river:"event_queue_size,attr,optional"` - EventFlushThreshold int `river:"event_flush_threshold,attr,optional"` - EventFlushInterval time.Duration `river:"event_flush_interval,attr,optional"` - - ParseDogStatsd bool `river:"parse_dogstatsd_tags,attr,optional"` - ParseInfluxDB bool `river:"parse_influxdb_tags,attr,optional"` - ParseLibrato bool `river:"parse_librato_tags,attr,optional"` - ParseSignalFX bool `river:"parse_signalfx_tags,attr,optional"` - - RelayAddr string `river:"relay_addr,attr,optional"` - RelayPacketLength int `river:"relay_packet_length,attr,optional"` + ListenUDP string `alloy:"listen_udp,attr,optional"` + ListenTCP string `alloy:"listen_tcp,attr,optional"` + ListenUnixgram string `alloy:"listen_unixgram,attr,optional"` + UnixSocketMode string `alloy:"unix_socket_mode,attr,optional"` + MappingConfig string `alloy:"mapping_config_path,attr,optional"` + + ReadBuffer int `alloy:"read_buffer,attr,optional"` + CacheSize int `alloy:"cache_size,attr,optional"` + CacheType string `alloy:"cache_type,attr,optional"` + EventQueueSize int `alloy:"event_queue_size,attr,optional"` + EventFlushThreshold int `alloy:"event_flush_threshold,attr,optional"` + EventFlushInterval time.Duration `alloy:"event_flush_interval,attr,optional"` + + ParseDogStatsd bool `alloy:"parse_dogstatsd_tags,attr,optional"` + ParseInfluxDB bool `alloy:"parse_influxdb_tags,attr,optional"` + ParseLibrato bool `alloy:"parse_librato_tags,attr,optional"` + ParseSignalFX bool `alloy:"parse_signalfx_tags,attr,optional"` + + RelayAddr string `alloy:"relay_addr,attr,optional"` + RelayPacketLength int `alloy:"relay_packet_length,attr,optional"` } // DefaultConfig holds non-zero default options for the Config when it is diff --git a/internal/component/prometheus/exporter/unix/config.go b/internal/component/prometheus/exporter/unix/config.go index 96de4a5c6b..f5303566e0 100644 --- a/internal/component/prometheus/exporter/unix/config.go +++ b/internal/component/prometheus/exporter/unix/config.go @@ -63,42 +63,42 @@ var DefaultArguments = Arguments{ // Arguments is used for controlling for this exporter. type Arguments struct { - IncludeExporterMetrics bool `river:"include_exporter_metrics,attr,optional"` - ProcFSPath string `river:"procfs_path,attr,optional"` - SysFSPath string `river:"sysfs_path,attr,optional"` - RootFSPath string `river:"rootfs_path,attr,optional"` - UdevDataPath string `river:"udev_data_path,attr,optional"` + IncludeExporterMetrics bool `alloy:"include_exporter_metrics,attr,optional"` + ProcFSPath string `alloy:"procfs_path,attr,optional"` + SysFSPath string `alloy:"sysfs_path,attr,optional"` + RootFSPath string `alloy:"rootfs_path,attr,optional"` + UdevDataPath string `alloy:"udev_data_path,attr,optional"` // Collectors to mark as enabled - EnableCollectors flagext.StringSlice `river:"enable_collectors,attr,optional"` + EnableCollectors flagext.StringSlice `alloy:"enable_collectors,attr,optional"` // Collectors to mark as disabled - DisableCollectors flagext.StringSlice `river:"disable_collectors,attr,optional"` + DisableCollectors flagext.StringSlice `alloy:"disable_collectors,attr,optional"` // Overrides the default set of enabled collectors with the collectors // listed. - SetCollectors flagext.StringSlice `river:"set_collectors,attr,optional"` + SetCollectors flagext.StringSlice `alloy:"set_collectors,attr,optional"` // Collector-specific config options - BCache BCacheConfig `river:"bcache,block,optional"` - CPU CPUConfig `river:"cpu,block,optional"` - Disk DiskStatsConfig `river:"disk,block,optional"` - EthTool EthToolConfig `river:"ethtool,block,optional"` - Filesystem FilesystemConfig `river:"filesystem,block,optional"` - IPVS IPVSConfig `river:"ipvs,block,optional"` - NTP NTPConfig `river:"ntp,block,optional"` - Netclass NetclassConfig `river:"netclass,block,optional"` - Netdev NetdevConfig `river:"netdev,block,optional"` - Netstat NetstatConfig `river:"netstat,block,optional"` - Perf PerfConfig `river:"perf,block,optional"` - Powersupply PowersupplyConfig `river:"powersupply,block,optional"` - Runit RunitConfig `river:"runit,block,optional"` - Supervisord SupervisordConfig `river:"supervisord,block,optional"` - Sysctl SysctlConfig `river:"sysctl,block,optional"` - Systemd SystemdConfig `river:"systemd,block,optional"` - Tapestats TapestatsConfig `river:"tapestats,block,optional"` - Textfile TextfileConfig `river:"textfile,block,optional"` - VMStat VMStatConfig `river:"vmstat,block,optional"` + BCache BCacheConfig `alloy:"bcache,block,optional"` + CPU CPUConfig `alloy:"cpu,block,optional"` + Disk DiskStatsConfig `alloy:"disk,block,optional"` + EthTool EthToolConfig `alloy:"ethtool,block,optional"` + Filesystem FilesystemConfig `alloy:"filesystem,block,optional"` + IPVS IPVSConfig `alloy:"ipvs,block,optional"` + NTP NTPConfig `alloy:"ntp,block,optional"` + Netclass NetclassConfig `alloy:"netclass,block,optional"` + Netdev NetdevConfig `alloy:"netdev,block,optional"` + Netstat NetstatConfig `alloy:"netstat,block,optional"` + Perf PerfConfig `alloy:"perf,block,optional"` + Powersupply PowersupplyConfig `alloy:"powersupply,block,optional"` + Runit RunitConfig `alloy:"runit,block,optional"` + Supervisord SupervisordConfig `alloy:"supervisord,block,optional"` + Sysctl SysctlConfig `alloy:"sysctl,block,optional"` + Systemd SystemdConfig `alloy:"systemd,block,optional"` + Tapestats TapestatsConfig `alloy:"tapestats,block,optional"` + Textfile TextfileConfig `alloy:"textfile,block,optional"` + VMStat VMStatConfig `alloy:"vmstat,block,optional"` } // Convert gives a config suitable for use with github.com/grafana/agent/internal/static/integrations/node_exporter. @@ -169,125 +169,125 @@ func (a *Arguments) SetToDefault() { // PowersupplyConfig contains config specific to the powersupply collector. type PowersupplyConfig struct { - IgnoredSupplies string `river:"ignored_supplies,attr,optional"` + IgnoredSupplies string `alloy:"ignored_supplies,attr,optional"` } // RunitConfig contains config specific to the runit collector. type RunitConfig struct { - ServiceDir string `river:"service_dir,attr,optional"` + ServiceDir string `alloy:"service_dir,attr,optional"` } // SupervisordConfig contains config specific to the supervisord collector. type SupervisordConfig struct { - URL string `river:"url,attr,optional"` + URL string `alloy:"url,attr,optional"` } // TapestatsConfig contains config specific to the tapestats collector. type TapestatsConfig struct { - IgnoredDevices string `river:"ignored_devices,attr,optional"` + IgnoredDevices string `alloy:"ignored_devices,attr,optional"` } // TextfileConfig contains config specific to the textfile collector. type TextfileConfig struct { - Directory string `river:"directory,attr,optional"` + Directory string `alloy:"directory,attr,optional"` } // VMStatConfig contains config specific to the vmstat collector. type VMStatConfig struct { - Fields string `river:"fields,attr,optional"` + Fields string `alloy:"fields,attr,optional"` } // NetclassConfig contains config specific to the netclass collector. type NetclassConfig struct { - IgnoreInvalidSpeedDevice bool `river:"ignore_invalid_speed_device,attr,optional"` - IgnoredDevices string `river:"ignored_devices,attr,optional"` + IgnoreInvalidSpeedDevice bool `alloy:"ignore_invalid_speed_device,attr,optional"` + IgnoredDevices string `alloy:"ignored_devices,attr,optional"` } // NetdevConfig contains config specific to the netdev collector. type NetdevConfig struct { - AddressInfo bool `river:"address_info,attr,optional"` - DeviceExclude string `river:"device_exclude,attr,optional"` - DeviceInclude string `river:"device_include,attr,optional"` + AddressInfo bool `alloy:"address_info,attr,optional"` + DeviceExclude string `alloy:"device_exclude,attr,optional"` + DeviceInclude string `alloy:"device_include,attr,optional"` } // NetstatConfig contains config specific to the netstat collector. type NetstatConfig struct { - Fields string `river:"fields,attr,optional"` + Fields string `alloy:"fields,attr,optional"` } // PerfConfig contains config specific to the perf collector. type PerfConfig struct { - CPUS string `river:"cpus,attr,optional"` - Tracepoint flagext.StringSlice `river:"tracepoint,attr,optional"` + CPUS string `alloy:"cpus,attr,optional"` + Tracepoint flagext.StringSlice `alloy:"tracepoint,attr,optional"` - DisableHardwareProfilers bool `river:"disable_hardware_profilers,attr,optional"` - DisableSoftwareProfilers bool `river:"disable_software_profilers,attr,optional"` - DisableCacheProfilers bool `river:"disable_cache_profilers,attr,optional"` + DisableHardwareProfilers bool `alloy:"disable_hardware_profilers,attr,optional"` + DisableSoftwareProfilers bool `alloy:"disable_software_profilers,attr,optional"` + DisableCacheProfilers bool `alloy:"disable_cache_profilers,attr,optional"` - HardwareProfilers flagext.StringSlice `river:"hardware_profilers,attr,optional"` - SoftwareProfilers flagext.StringSlice `river:"software_profilers,attr,optional"` - CacheProfilers flagext.StringSlice `river:"cache_profilers,attr,optional"` + HardwareProfilers flagext.StringSlice `alloy:"hardware_profilers,attr,optional"` + SoftwareProfilers flagext.StringSlice `alloy:"software_profilers,attr,optional"` + CacheProfilers flagext.StringSlice `alloy:"cache_profilers,attr,optional"` } // EthToolConfig contains config specific to the ethtool collector. type EthToolConfig struct { - DeviceExclude string `river:"device_exclude,attr,optional"` - DeviceInclude string `river:"device_include,attr,optional"` - MetricsInclude string `river:"metrics_include,attr,optional"` + DeviceExclude string `alloy:"device_exclude,attr,optional"` + DeviceInclude string `alloy:"device_include,attr,optional"` + MetricsInclude string `alloy:"metrics_include,attr,optional"` } // FilesystemConfig contains config specific to the filesystem collector. type FilesystemConfig struct { - FSTypesExclude string `river:"fs_types_exclude,attr,optional"` - MountPointsExclude string `river:"mount_points_exclude,attr,optional"` - MountTimeout time.Duration `river:"mount_timeout,attr,optional"` + FSTypesExclude string `alloy:"fs_types_exclude,attr,optional"` + MountPointsExclude string `alloy:"mount_points_exclude,attr,optional"` + MountTimeout time.Duration `alloy:"mount_timeout,attr,optional"` } // IPVSConfig contains config specific to the ipvs collector. type IPVSConfig struct { - BackendLabels []string `river:"backend_labels,attr,optional"` + BackendLabels []string `alloy:"backend_labels,attr,optional"` } // BCacheConfig contains config specific to the bcache collector. type BCacheConfig struct { - PriorityStats bool `river:"priority_stats,attr,optional"` + PriorityStats bool `alloy:"priority_stats,attr,optional"` } // CPUConfig contains config specific to the cpu collector. type CPUConfig struct { - BugsInclude string `river:"bugs_include,attr,optional"` - EnableCPUGuest bool `river:"guest,attr,optional"` - EnableCPUInfo bool `river:"info,attr,optional"` - FlagsInclude string `river:"flags_include,attr,optional"` + BugsInclude string `alloy:"bugs_include,attr,optional"` + EnableCPUGuest bool `alloy:"guest,attr,optional"` + EnableCPUInfo bool `alloy:"info,attr,optional"` + FlagsInclude string `alloy:"flags_include,attr,optional"` } // DiskStatsConfig contains config specific to the diskstats collector. type DiskStatsConfig struct { - DeviceExclude string `river:"device_exclude,attr,optional"` - DeviceInclude string `river:"device_include,attr,optional"` + DeviceExclude string `alloy:"device_exclude,attr,optional"` + DeviceInclude string `alloy:"device_include,attr,optional"` } // NTPConfig contains config specific to the ntp collector. type NTPConfig struct { - IPTTL int `river:"ip_ttl,attr,optional"` - LocalOffsetTolerance time.Duration `river:"local_offset_tolerance,attr,optional"` - MaxDistance time.Duration `river:"max_distance,attr,optional"` - ProtocolVersion int `river:"protocol_version,attr,optional"` - Server string `river:"server,attr,optional"` - ServerIsLocal bool `river:"server_is_local,attr,optional"` + IPTTL int `alloy:"ip_ttl,attr,optional"` + LocalOffsetTolerance time.Duration `alloy:"local_offset_tolerance,attr,optional"` + MaxDistance time.Duration `alloy:"max_distance,attr,optional"` + ProtocolVersion int `alloy:"protocol_version,attr,optional"` + Server string `alloy:"server,attr,optional"` + ServerIsLocal bool `alloy:"server_is_local,attr,optional"` } // SystemdConfig contains config specific to the systemd collector. type SystemdConfig struct { - EnableRestartsMetrics bool `river:"enable_restarts,attr,optional"` - EnableStartTimeMetrics bool `river:"start_time,attr,optional"` - EnableTaskMetrics bool `river:"task_metrics,attr,optional"` - UnitExclude string `river:"unit_exclude,attr,optional"` - UnitInclude string `river:"unit_include,attr,optional"` + EnableRestartsMetrics bool `alloy:"enable_restarts,attr,optional"` + EnableStartTimeMetrics bool `alloy:"start_time,attr,optional"` + EnableTaskMetrics bool `alloy:"task_metrics,attr,optional"` + UnitExclude string `alloy:"unit_exclude,attr,optional"` + UnitInclude string `alloy:"unit_include,attr,optional"` } // SysctlConfig contains config specific to the sysctl collector. type SysctlConfig struct { - Include []string `river:"include,attr,optional"` - IncludeInfo []string `river:"include_info,attr,optional"` + Include []string `alloy:"include,attr,optional"` + IncludeInfo []string `alloy:"include_info,attr,optional"` } diff --git a/internal/component/prometheus/exporter/windows/config.go b/internal/component/prometheus/exporter/windows/config.go index 185f6213ac..e4bff8c46c 100644 --- a/internal/component/prometheus/exporter/windows/config.go +++ b/internal/component/prometheus/exporter/windows/config.go @@ -9,22 +9,22 @@ import ( // Arguments is used for controlling for this exporter. type Arguments struct { // Collectors to mark as enabled - EnabledCollectors []string `river:"enabled_collectors,attr,optional"` + EnabledCollectors []string `alloy:"enabled_collectors,attr,optional"` // Collector-specific config options - Dfsr DfsrConfig `river:"dfsr,block,optional"` - Exchange ExchangeConfig `river:"exchange,block,optional"` - IIS IISConfig `river:"iis,block,optional"` - LogicalDisk LogicalDiskConfig `river:"logical_disk,block,optional"` - MSMQ MSMQConfig `river:"msmq,block,optional"` - MSSQL MSSQLConfig `river:"mssql,block,optional"` - Network NetworkConfig `river:"network,block,optional"` - PhysicalDisk PhysicalDiskConfig `river:"physical_disk,block,optional"` - Process ProcessConfig `river:"process,block,optional"` - ScheduledTask ScheduledTaskConfig `river:"scheduled_task,block,optional"` - Service ServiceConfig `river:"service,block,optional"` - SMTP SMTPConfig `river:"smtp,block,optional"` - TextFile TextFileConfig `river:"text_file,block,optional"` + Dfsr DfsrConfig `alloy:"dfsr,block,optional"` + Exchange ExchangeConfig `alloy:"exchange,block,optional"` + IIS IISConfig `alloy:"iis,block,optional"` + LogicalDisk LogicalDiskConfig `alloy:"logical_disk,block,optional"` + MSMQ MSMQConfig `alloy:"msmq,block,optional"` + MSSQL MSSQLConfig `alloy:"mssql,block,optional"` + Network NetworkConfig `alloy:"network,block,optional"` + PhysicalDisk PhysicalDiskConfig `alloy:"physical_disk,block,optional"` + Process ProcessConfig `alloy:"process,block,optional"` + ScheduledTask ScheduledTaskConfig `alloy:"scheduled_task,block,optional"` + Service ServiceConfig `alloy:"service,block,optional"` + SMTP SMTPConfig `alloy:"smtp,block,optional"` + TextFile TextFileConfig `alloy:"text_file,block,optional"` } // Convert converts the component's Arguments to the integration's Config. @@ -49,7 +49,7 @@ func (a *Arguments) Convert() *windows_integration.Config { // DfsrConfig handles settings for the windows_exporter Exchange collector type DfsrConfig struct { - SourcesEnabled []string `river:"sources_enabled,attr,optional"` + SourcesEnabled []string `alloy:"sources_enabled,attr,optional"` } // Convert converts the component's DfsrConfig to the integration's ExchangeConfig. @@ -61,7 +61,7 @@ func (t DfsrConfig) Convert() windows_integration.DfsrConfig { // ExchangeConfig handles settings for the windows_exporter Exchange collector type ExchangeConfig struct { - EnabledList []string `river:"enabled_list,attr,optional"` + EnabledList []string `alloy:"enabled_list,attr,optional"` } // Convert converts the component's ExchangeConfig to the integration's ExchangeConfig. @@ -73,14 +73,14 @@ func (t ExchangeConfig) Convert() windows_integration.ExchangeConfig { // IISConfig handles settings for the windows_exporter IIS collector type IISConfig struct { - AppBlackList string `river:"app_blacklist,attr,optional"` - AppWhiteList string `river:"app_whitelist,attr,optional"` - SiteBlackList string `river:"site_blacklist,attr,optional"` - SiteWhiteList string `river:"site_whitelist,attr,optional"` - AppExclude string `river:"app_exclude,attr,optional"` - AppInclude string `river:"app_include,attr,optional"` - SiteExclude string `river:"site_exclude,attr,optional"` - SiteInclude string `river:"site_include,attr,optional"` + AppBlackList string `alloy:"app_blacklist,attr,optional"` + AppWhiteList string `alloy:"app_whitelist,attr,optional"` + SiteBlackList string `alloy:"site_blacklist,attr,optional"` + SiteWhiteList string `alloy:"site_whitelist,attr,optional"` + AppExclude string `alloy:"app_exclude,attr,optional"` + AppInclude string `alloy:"app_include,attr,optional"` + SiteExclude string `alloy:"site_exclude,attr,optional"` + SiteInclude string `alloy:"site_include,attr,optional"` } // Convert converts the component's IISConfig to the integration's IISConfig. @@ -99,7 +99,7 @@ func (t IISConfig) Convert() windows_integration.IISConfig { // TextFileConfig handles settings for the windows_exporter Text File collector type TextFileConfig struct { - TextFileDirectory string `river:"text_file_directory,attr,optional"` + TextFileDirectory string `alloy:"text_file_directory,attr,optional"` } // Convert converts the component's TextFileConfig to the integration's TextFileConfig. @@ -111,10 +111,10 @@ func (t TextFileConfig) Convert() windows_integration.TextFileConfig { // SMTPConfig handles settings for the windows_exporter SMTP collector type SMTPConfig struct { - BlackList string `river:"blacklist,attr,optional"` - WhiteList string `river:"whitelist,attr,optional"` - Exclude string `river:"exclude,attr,optional"` - Include string `river:"include,attr,optional"` + BlackList string `alloy:"blacklist,attr,optional"` + WhiteList string `alloy:"whitelist,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` + Include string `alloy:"include,attr,optional"` } // Convert converts the component's SMTPConfig to the integration's SMTPConfig. @@ -129,8 +129,8 @@ func (t SMTPConfig) Convert() windows_integration.SMTPConfig { // ServiceConfig handles settings for the windows_exporter service collector type ServiceConfig struct { - UseApi string `river:"use_api,attr,optional"` - Where string `river:"where_clause,attr,optional"` + UseApi string `alloy:"use_api,attr,optional"` + Where string `alloy:"where_clause,attr,optional"` } // Convert converts the component's ServiceConfig to the integration's ServiceConfig. @@ -143,10 +143,10 @@ func (t ServiceConfig) Convert() windows_integration.ServiceConfig { // ProcessConfig handles settings for the windows_exporter process collector type ProcessConfig struct { - BlackList string `river:"blacklist,attr,optional"` - WhiteList string `river:"whitelist,attr,optional"` - Exclude string `river:"exclude,attr,optional"` - Include string `river:"include,attr,optional"` + BlackList string `alloy:"blacklist,attr,optional"` + WhiteList string `alloy:"whitelist,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` + Include string `alloy:"include,attr,optional"` } // Convert converts the component's ProcessConfig to the integration's ProcessConfig. @@ -161,8 +161,8 @@ func (t ProcessConfig) Convert() windows_integration.ProcessConfig { // ScheduledTaskConfig handles settings for the windows_exporter process collector type ScheduledTaskConfig struct { - Exclude string `river:"exclude,attr,optional"` - Include string `river:"include,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` + Include string `alloy:"include,attr,optional"` } // Convert converts the component's ScheduledTaskConfig to the integration's ScheduledTaskConfig. @@ -175,10 +175,10 @@ func (t ScheduledTaskConfig) Convert() windows_integration.ScheduledTaskConfig { // NetworkConfig handles settings for the windows_exporter network collector type NetworkConfig struct { - BlackList string `river:"blacklist,attr,optional"` - WhiteList string `river:"whitelist,attr,optional"` - Exclude string `river:"exclude,attr,optional"` - Include string `river:"include,attr,optional"` + BlackList string `alloy:"blacklist,attr,optional"` + WhiteList string `alloy:"whitelist,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` + Include string `alloy:"include,attr,optional"` } // Convert converts the component's NetworkConfig to the integration's NetworkConfig. @@ -193,7 +193,7 @@ func (t NetworkConfig) Convert() windows_integration.NetworkConfig { // MSSQLConfig handles settings for the windows_exporter SQL server collector type MSSQLConfig struct { - EnabledClasses []string `river:"enabled_classes,attr,optional"` + EnabledClasses []string `alloy:"enabled_classes,attr,optional"` } // Convert converts the component's MSSQLConfig to the integration's MSSQLConfig. @@ -205,7 +205,7 @@ func (t MSSQLConfig) Convert() windows_integration.MSSQLConfig { // MSMQConfig handles settings for the windows_exporter MSMQ collector type MSMQConfig struct { - Where string `river:"where_clause,attr,optional"` + Where string `alloy:"where_clause,attr,optional"` } // Convert converts the component's MSMQConfig to the integration's MSMQConfig. @@ -217,10 +217,10 @@ func (t MSMQConfig) Convert() windows_integration.MSMQConfig { // LogicalDiskConfig handles settings for the windows_exporter logical disk collector type LogicalDiskConfig struct { - BlackList string `river:"blacklist,attr,optional"` - WhiteList string `river:"whitelist,attr,optional"` - Include string `river:"include,attr,optional"` - Exclude string `river:"exclude,attr,optional"` + BlackList string `alloy:"blacklist,attr,optional"` + WhiteList string `alloy:"whitelist,attr,optional"` + Include string `alloy:"include,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` } // Convert converts the component's LogicalDiskConfig to the integration's LogicalDiskConfig. @@ -235,8 +235,8 @@ func (t LogicalDiskConfig) Convert() windows_integration.LogicalDiskConfig { // PhysicalDiskConfig handles settings for the windows_exporter physical disk collector type PhysicalDiskConfig struct { - Include string `river:"include,attr,optional"` - Exclude string `river:"exclude,attr,optional"` + Include string `alloy:"include,attr,optional"` + Exclude string `alloy:"exclude,attr,optional"` } // Convert converts the component's PhysicalDiskConfig to the integration's PhysicalDiskConfig. diff --git a/internal/component/prometheus/operator/types.go b/internal/component/prometheus/operator/types.go index eefd8ab7c8..7f06f2f040 100644 --- a/internal/component/prometheus/operator/types.go +++ b/internal/component/prometheus/operator/types.go @@ -17,30 +17,30 @@ import ( type Arguments struct { // Client settings to connect to Kubernetes. - Client kubernetes.ClientArguments `river:"client,block,optional"` + Client kubernetes.ClientArguments `alloy:"client,block,optional"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` + ForwardTo []storage.Appendable `alloy:"forward_to,attr"` // Namespaces to search for monitor resources. Empty implies All namespaces - Namespaces []string `river:"namespaces,attr,optional"` + Namespaces []string `alloy:"namespaces,attr,optional"` // LabelSelector allows filtering discovered monitor resources by labels - LabelSelector *config.LabelSelector `river:"selector,block,optional"` + LabelSelector *config.LabelSelector `alloy:"selector,block,optional"` - Clustering cluster.ComponentBlock `river:"clustering,block,optional"` + Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` - RelabelConfigs []*flow_relabel.Config `river:"rule,block,optional"` + RelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` - Scrape ScrapeOptions `river:"scrape,block,optional"` + Scrape ScrapeOptions `alloy:"scrape,block,optional"` } // ScrapeOptions holds values that configure scraping behavior. type ScrapeOptions struct { // DefaultScrapeInterval is the default interval to scrape targets. - DefaultScrapeInterval time.Duration `river:"default_scrape_interval,attr,optional"` + DefaultScrapeInterval time.Duration `alloy:"default_scrape_interval,attr,optional"` // DefaultScrapeTimeout is the default timeout to scrape targets. - DefaultScrapeTimeout time.Duration `river:"default_scrape_timeout,attr,optional"` + DefaultScrapeTimeout time.Duration `alloy:"default_scrape_timeout,attr,optional"` } func (s *ScrapeOptions) GlobalConfig() promconfig.GlobalConfig { @@ -70,14 +70,14 @@ func (args *Arguments) Validate() error { } type DebugInfo struct { - DiscoveredCRDs []*DiscoveredResource `river:"crds,block"` - Targets []scrape.TargetStatus `river:"targets,block,optional"` + DiscoveredCRDs []*DiscoveredResource `alloy:"crds,block"` + Targets []scrape.TargetStatus `alloy:"targets,block,optional"` } type DiscoveredResource struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - LastReconcile time.Time `river:"last_reconcile,attr,optional"` - ReconcileError string `river:"reconcile_error,attr,optional"` - ScrapeConfigsURL string `river:"scrape_configs_url,attr,optional"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + LastReconcile time.Time `alloy:"last_reconcile,attr,optional"` + ReconcileError string `alloy:"reconcile_error,attr,optional"` + ScrapeConfigsURL string `alloy:"scrape_configs_url,attr,optional"` } diff --git a/internal/component/prometheus/receive_http/receive_http.go b/internal/component/prometheus/receive_http/receive_http.go index 1a4b6be4b5..54200481bd 100644 --- a/internal/component/prometheus/receive_http/receive_http.go +++ b/internal/component/prometheus/receive_http/receive_http.go @@ -33,8 +33,8 @@ func init() { } type Arguments struct { - Server *fnet.ServerConfig `river:",squash"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` + Server *fnet.ServerConfig `alloy:",squash"` + ForwardTo []storage.Appendable `alloy:"forward_to,attr"` } // SetToDefault implements river.Defaulter. diff --git a/internal/component/prometheus/relabel/relabel.go b/internal/component/prometheus/relabel/relabel.go index 459de792f2..0a4a81510c 100644 --- a/internal/component/prometheus/relabel/relabel.go +++ b/internal/component/prometheus/relabel/relabel.go @@ -39,13 +39,13 @@ func init() { // component. type Arguments struct { // Where the relabelled metrics should be forwarded to. - ForwardTo []storage.Appendable `river:"forward_to,attr"` + ForwardTo []storage.Appendable `alloy:"forward_to,attr"` // The relabelling rules to apply to each metric before it's forwarded. - MetricRelabelConfigs []*flow_relabel.Config `river:"rule,block,optional"` + MetricRelabelConfigs []*flow_relabel.Config `alloy:"rule,block,optional"` // Cache size to use for LRU cache. - CacheSize int `river:"max_cache_size,attr,optional"` + CacheSize int `alloy:"max_cache_size,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -65,8 +65,8 @@ func (arg *Arguments) Validate() error { // Exports holds values which are exported by the prometheus.relabel component. type Exports struct { - Receiver storage.Appendable `river:"receiver,attr"` - Rules flow_relabel.Rules `river:"rules,attr"` + Receiver storage.Appendable `alloy:"receiver,attr"` + Rules flow_relabel.Rules `alloy:"rules,attr"` } // Component implements the prometheus.relabel component. diff --git a/internal/component/prometheus/remotewrite/types.go b/internal/component/prometheus/remotewrite/types.go index 79b1649e2d..f678e0ca51 100644 --- a/internal/component/prometheus/remotewrite/types.go +++ b/internal/component/prometheus/remotewrite/types.go @@ -54,9 +54,9 @@ var ( // Arguments represents the input state of the prometheus.remote_write // component. type Arguments struct { - ExternalLabels map[string]string `river:"external_labels,attr,optional"` - Endpoints []*EndpointOptions `river:"endpoint,block,optional"` - WALOptions WALOptions `river:"wal,block,optional"` + ExternalLabels map[string]string `alloy:"external_labels,attr,optional"` + Endpoints []*EndpointOptions `alloy:"endpoint,block,optional"` + WALOptions WALOptions `alloy:"wal,block,optional"` } // SetToDefault implements river.Defaulter. @@ -67,18 +67,18 @@ func (rc *Arguments) SetToDefault() { // EndpointOptions describes an individual location for where metrics in the WAL // should be delivered to using the remote_write protocol. type EndpointOptions struct { - Name string `river:"name,attr,optional"` - URL string `river:"url,attr"` - RemoteTimeout time.Duration `river:"remote_timeout,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - SendExemplars bool `river:"send_exemplars,attr,optional"` - SendNativeHistograms bool `river:"send_native_histograms,attr,optional"` - HTTPClientConfig *types.HTTPClientConfig `river:",squash"` - QueueOptions *QueueOptions `river:"queue_config,block,optional"` - MetadataOptions *MetadataOptions `river:"metadata_config,block,optional"` - WriteRelabelConfigs []*flow_relabel.Config `river:"write_relabel_config,block,optional"` - SigV4 *SigV4Config `river:"sigv4,block,optional"` - AzureAD *AzureADConfig `river:"azuread,block,optional"` + Name string `alloy:"name,attr,optional"` + URL string `alloy:"url,attr"` + RemoteTimeout time.Duration `alloy:"remote_timeout,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + SendExemplars bool `alloy:"send_exemplars,attr,optional"` + SendNativeHistograms bool `alloy:"send_native_histograms,attr,optional"` + HTTPClientConfig *types.HTTPClientConfig `alloy:",squash"` + QueueOptions *QueueOptions `alloy:"queue_config,block,optional"` + MetadataOptions *MetadataOptions `alloy:"metadata_config,block,optional"` + WriteRelabelConfigs []*flow_relabel.Config `alloy:"write_relabel_config,block,optional"` + SigV4 *SigV4Config `alloy:"sigv4,block,optional"` + AzureAD *AzureADConfig `alloy:"azuread,block,optional"` } // SetToDefault implements river.Defaulter. @@ -134,15 +134,15 @@ func (r *EndpointOptions) Validate() error { // QueueOptions handles the low level queue config options for a remote_write type QueueOptions struct { - Capacity int `river:"capacity,attr,optional"` - MaxShards int `river:"max_shards,attr,optional"` - MinShards int `river:"min_shards,attr,optional"` - MaxSamplesPerSend int `river:"max_samples_per_send,attr,optional"` - BatchSendDeadline time.Duration `river:"batch_send_deadline,attr,optional"` - MinBackoff time.Duration `river:"min_backoff,attr,optional"` - MaxBackoff time.Duration `river:"max_backoff,attr,optional"` - RetryOnHTTP429 bool `river:"retry_on_http_429,attr,optional"` - SampleAgeLimit time.Duration `river:"sample_age_limit,attr,optional"` + Capacity int `alloy:"capacity,attr,optional"` + MaxShards int `alloy:"max_shards,attr,optional"` + MinShards int `alloy:"min_shards,attr,optional"` + MaxSamplesPerSend int `alloy:"max_samples_per_send,attr,optional"` + BatchSendDeadline time.Duration `alloy:"batch_send_deadline,attr,optional"` + MinBackoff time.Duration `alloy:"min_backoff,attr,optional"` + MaxBackoff time.Duration `alloy:"max_backoff,attr,optional"` + RetryOnHTTP429 bool `alloy:"retry_on_http_429,attr,optional"` + SampleAgeLimit time.Duration `alloy:"sample_age_limit,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -173,9 +173,9 @@ func (r *QueueOptions) toPrometheusType() config.QueueConfig { // MetadataOptions configures how metadata gets sent over the remote_write // protocol. type MetadataOptions struct { - Send bool `river:"send,attr,optional"` - SendInterval time.Duration `river:"send_interval,attr,optional"` - MaxSamplesPerSend int `river:"max_samples_per_send,attr,optional"` + Send bool `alloy:"send,attr,optional"` + SendInterval time.Duration `alloy:"send_interval,attr,optional"` + MaxSamplesPerSend int `alloy:"max_samples_per_send,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -199,9 +199,9 @@ func (o *MetadataOptions) toPrometheusType() config.MetadataConfig { // WALOptions configures behavior within the WAL. type WALOptions struct { - TruncateFrequency time.Duration `river:"truncate_frequency,attr,optional"` - MinKeepaliveTime time.Duration `river:"min_keepalive_time,attr,optional"` - MaxKeepaliveTime time.Duration `river:"max_keepalive_time,attr,optional"` + TruncateFrequency time.Duration `alloy:"truncate_frequency,attr,optional"` + MinKeepaliveTime time.Duration `alloy:"min_keepalive_time,attr,optional"` + MaxKeepaliveTime time.Duration `alloy:"max_keepalive_time,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -224,7 +224,7 @@ func (o *WALOptions) Validate() error { // Exports are the set of fields exposed by the prometheus.remote_write // component. type Exports struct { - Receiver storage.Appendable `river:"receiver,attr"` + Receiver storage.Appendable `alloy:"receiver,attr"` } func convertConfigs(cfg Arguments) (*config.Config, error) { @@ -271,7 +271,7 @@ func toLabels(in map[string]string) labels.Labels { // ManagedIdentityConfig is used to store managed identity config values type ManagedIdentityConfig struct { // ClientID is the clientId of the managed identity that is being used to authenticate. - ClientID string `river:"client_id,attr"` + ClientID string `alloy:"client_id,attr"` } func (m ManagedIdentityConfig) toPrometheusType() azuread.ManagedIdentityConfig { @@ -282,10 +282,10 @@ func (m ManagedIdentityConfig) toPrometheusType() azuread.ManagedIdentityConfig type AzureADConfig struct { // ManagedIdentity is the managed identity that is being used to authenticate. - ManagedIdentity ManagedIdentityConfig `river:"managed_identity,block"` + ManagedIdentity ManagedIdentityConfig `alloy:"managed_identity,block"` // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. - Cloud string `river:"cloud,attr,optional"` + Cloud string `alloy:"cloud,attr,optional"` } func (a *AzureADConfig) Validate() error { @@ -321,11 +321,11 @@ func (a *AzureADConfig) toPrometheusType() *azuread.AzureADConfig { } type SigV4Config struct { - Region string `river:"region,attr,optional"` - AccessKey string `river:"access_key,attr,optional"` - SecretKey alloytypes.Secret `river:"secret_key,attr,optional"` - Profile string `river:"profile,attr,optional"` - RoleARN string `river:"role_arn,attr,optional"` + Region string `alloy:"region,attr,optional"` + AccessKey string `alloy:"access_key,attr,optional"` + SecretKey alloytypes.Secret `alloy:"secret_key,attr,optional"` + Profile string `alloy:"profile,attr,optional"` + RoleARN string `alloy:"role_arn,attr,optional"` } func (s *SigV4Config) Validate() error { diff --git a/internal/component/prometheus/scrape/scrape.go b/internal/component/prometheus/scrape/scrape.go index e87dc5e108..31ea701251 100644 --- a/internal/component/prometheus/scrape/scrape.go +++ b/internal/component/prometheus/scrape/scrape.go @@ -44,55 +44,55 @@ func init() { // Arguments holds values which are used to configure the prometheus.scrape // component. type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []storage.Appendable `alloy:"forward_to,attr"` // The job name to override the job label with. - JobName string `river:"job_name,attr,optional"` + JobName string `alloy:"job_name,attr,optional"` // Indicator whether the scraped metrics should remain unmodified. - HonorLabels bool `river:"honor_labels,attr,optional"` + HonorLabels bool `alloy:"honor_labels,attr,optional"` // Indicator whether the scraped timestamps should be respected. - HonorTimestamps bool `river:"honor_timestamps,attr,optional"` + HonorTimestamps bool `alloy:"honor_timestamps,attr,optional"` // Indicator whether to track the staleness of the scraped timestamps. - TrackTimestampsStaleness bool `river:"track_timestamps_staleness,attr,optional"` + TrackTimestampsStaleness bool `alloy:"track_timestamps_staleness,attr,optional"` // A set of query parameters with which the target is scraped. - Params url.Values `river:"params,attr,optional"` + Params url.Values `alloy:"params,attr,optional"` // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `river:"scrape_classic_histograms,attr,optional"` + ScrapeClassicHistograms bool `alloy:"scrape_classic_histograms,attr,optional"` // How frequently to scrape the targets of this scrape config. - ScrapeInterval time.Duration `river:"scrape_interval,attr,optional"` + ScrapeInterval time.Duration `alloy:"scrape_interval,attr,optional"` // The timeout for scraping targets of this config. - ScrapeTimeout time.Duration `river:"scrape_timeout,attr,optional"` + ScrapeTimeout time.Duration `alloy:"scrape_timeout,attr,optional"` // The HTTP resource path on which to fetch metrics from targets. - MetricsPath string `river:"metrics_path,attr,optional"` + MetricsPath string `alloy:"metrics_path,attr,optional"` // The URL scheme with which to fetch metrics from targets. - Scheme string `river:"scheme,attr,optional"` + Scheme string `alloy:"scheme,attr,optional"` // An uncompressed response body larger than this many bytes will cause the // scrape to fail. 0 means no limit. - BodySizeLimit units.Base2Bytes `river:"body_size_limit,attr,optional"` + BodySizeLimit units.Base2Bytes `alloy:"body_size_limit,attr,optional"` // More than this many samples post metric-relabeling will cause the scrape // to fail. - SampleLimit uint `river:"sample_limit,attr,optional"` + SampleLimit uint `alloy:"sample_limit,attr,optional"` // More than this many targets after the target relabeling will cause the // scrapes to fail. - TargetLimit uint `river:"target_limit,attr,optional"` + TargetLimit uint `alloy:"target_limit,attr,optional"` // More than this many labels post metric-relabeling will cause the scrape // to fail. - LabelLimit uint `river:"label_limit,attr,optional"` + LabelLimit uint `alloy:"label_limit,attr,optional"` // More than this label name length post metric-relabeling will cause the // scrape to fail. - LabelNameLengthLimit uint `river:"label_name_length_limit,attr,optional"` + LabelNameLengthLimit uint `alloy:"label_name_length_limit,attr,optional"` // More than this label value length post metric-relabeling will cause the // scrape to fail. - LabelValueLengthLimit uint `river:"label_value_length_limit,attr,optional"` + LabelValueLengthLimit uint `alloy:"label_value_length_limit,attr,optional"` - HTTPClientConfig component_config.HTTPClientConfig `river:",squash"` + HTTPClientConfig component_config.HTTPClientConfig `alloy:",squash"` // Scrape Options - ExtraMetrics bool `river:"extra_metrics,attr,optional"` - EnableProtobufNegotiation bool `river:"enable_protobuf_negotiation,attr,optional"` + ExtraMetrics bool `alloy:"extra_metrics,attr,optional"` + EnableProtobufNegotiation bool `alloy:"enable_protobuf_negotiation,attr,optional"` - Clustering cluster.ComponentBlock `river:"clustering,block,optional"` + Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` } // SetToDefault implements river.Defaulter. @@ -327,18 +327,18 @@ func (c *Component) distTargets( // ScraperStatus reports the status of the scraper's jobs. type ScraperStatus struct { - TargetStatus []TargetStatus `river:"target,block,optional"` + TargetStatus []TargetStatus `alloy:"target,block,optional"` } // TargetStatus reports on the status of the latest scrape for a target. type TargetStatus struct { - JobName string `river:"job,attr"` - URL string `river:"url,attr"` - Health string `river:"health,attr"` - Labels map[string]string `river:"labels,attr"` - LastError string `river:"last_error,attr,optional"` - LastScrape time.Time `river:"last_scrape,attr"` - LastScrapeDuration time.Duration `river:"last_scrape_duration,attr,optional"` + JobName string `alloy:"job,attr"` + URL string `alloy:"url,attr"` + Health string `alloy:"health,attr"` + Labels map[string]string `alloy:"labels,attr"` + LastError string `alloy:"last_error,attr,optional"` + LastScrape time.Time `alloy:"last_scrape,attr"` + LastScrapeDuration time.Duration `alloy:"last_scrape_duration,attr,optional"` } // BuildTargetStatuses transforms the targets from a scrape manager into our internal status type for debug info. diff --git a/internal/component/pyroscope/ebpf/args.go b/internal/component/pyroscope/ebpf/args.go index f2620fedd4..808a121d82 100644 --- a/internal/component/pyroscope/ebpf/args.go +++ b/internal/component/pyroscope/ebpf/args.go @@ -8,17 +8,17 @@ import ( ) type Arguments struct { - ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` - Targets []discovery.Target `river:"targets,attr,optional"` - CollectInterval time.Duration `river:"collect_interval,attr,optional"` - SampleRate int `river:"sample_rate,attr,optional"` - PidCacheSize int `river:"pid_cache_size,attr,optional"` - BuildIDCacheSize int `river:"build_id_cache_size,attr,optional"` - SameFileCacheSize int `river:"same_file_cache_size,attr,optional"` - ContainerIDCacheSize int `river:"container_id_cache_size,attr,optional"` - CacheRounds int `river:"cache_rounds,attr,optional"` - CollectUserProfile bool `river:"collect_user_profile,attr,optional"` - CollectKernelProfile bool `river:"collect_kernel_profile,attr,optional"` - Demangle string `river:"demangle,attr,optional"` - PythonEnabled bool `river:"python_enabled,attr,optional"` + ForwardTo []pyroscope.Appendable `alloy:"forward_to,attr"` + Targets []discovery.Target `alloy:"targets,attr,optional"` + CollectInterval time.Duration `alloy:"collect_interval,attr,optional"` + SampleRate int `alloy:"sample_rate,attr,optional"` + PidCacheSize int `alloy:"pid_cache_size,attr,optional"` + BuildIDCacheSize int `alloy:"build_id_cache_size,attr,optional"` + SameFileCacheSize int `alloy:"same_file_cache_size,attr,optional"` + ContainerIDCacheSize int `alloy:"container_id_cache_size,attr,optional"` + CacheRounds int `alloy:"cache_rounds,attr,optional"` + CollectUserProfile bool `alloy:"collect_user_profile,attr,optional"` + CollectKernelProfile bool `alloy:"collect_kernel_profile,attr,optional"` + Demangle string `alloy:"demangle,attr,optional"` + PythonEnabled bool `alloy:"python_enabled,attr,optional"` } diff --git a/internal/component/pyroscope/ebpf/ebpf_linux.go b/internal/component/pyroscope/ebpf/ebpf_linux.go index 41f48431f5..7416ee8854 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux.go @@ -200,8 +200,8 @@ func (c *Component) collectProfiles() error { } type DebugInfo struct { - Targets interface{} `river:"targets,attr,optional"` - Session interface{} `river:"session,attr,optional"` + Targets interface{} `alloy:"targets,attr,optional"` + Session interface{} `alloy:"session,attr,optional"` } func (c *Component) updateDebugInfo() { diff --git a/internal/component/pyroscope/java/args.go b/internal/component/pyroscope/java/args.go index 0eba7ff2df..b6529ef368 100644 --- a/internal/component/pyroscope/java/args.go +++ b/internal/component/pyroscope/java/args.go @@ -8,19 +8,19 @@ import ( ) type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []pyroscope.Appendable `alloy:"forward_to,attr"` - TmpDir string `river:"tmp_dir,attr,optional"` - ProfilingConfig ProfilingConfig `river:"profiling_config,block,optional"` + TmpDir string `alloy:"tmp_dir,attr,optional"` + ProfilingConfig ProfilingConfig `alloy:"profiling_config,block,optional"` } type ProfilingConfig struct { - Interval time.Duration `river:"interval,attr,optional"` - SampleRate int `river:"sample_rate,attr,optional"` - Alloc string `river:"alloc,attr,optional"` - Lock string `river:"lock,attr,optional"` - CPU bool `river:"cpu,attr,optional"` + Interval time.Duration `alloy:"interval,attr,optional"` + SampleRate int `alloy:"sample_rate,attr,optional"` + Alloc string `alloy:"alloc,attr,optional"` + Lock string `alloy:"lock,attr,optional"` + CPU bool `alloy:"cpu,attr,optional"` } func (rc *Arguments) UnmarshalRiver(f func(interface{}) error) error { diff --git a/internal/component/pyroscope/scrape/scrape.go b/internal/component/pyroscope/scrape/scrape.go index dbb25884b7..9507eced24 100644 --- a/internal/component/pyroscope/scrape/scrape.go +++ b/internal/component/pyroscope/scrape/scrape.go @@ -47,57 +47,57 @@ func init() { // Arguments holds values which are used to configure the pprof.scrape // component. type Arguments struct { - Targets []discovery.Target `river:"targets,attr"` - ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` + Targets []discovery.Target `alloy:"targets,attr"` + ForwardTo []pyroscope.Appendable `alloy:"forward_to,attr"` // The job name to override the job label with. - JobName string `river:"job_name,attr,optional"` + JobName string `alloy:"job_name,attr,optional"` // A set of query parameters with which the target is scraped. - Params url.Values `river:"params,attr,optional"` + Params url.Values `alloy:"params,attr,optional"` // How frequently to scrape the targets of this scrape config. - ScrapeInterval time.Duration `river:"scrape_interval,attr,optional"` + ScrapeInterval time.Duration `alloy:"scrape_interval,attr,optional"` // The timeout for scraping targets of this config. - ScrapeTimeout time.Duration `river:"scrape_timeout,attr,optional"` + ScrapeTimeout time.Duration `alloy:"scrape_timeout,attr,optional"` // The URL scheme with which to fetch metrics from targets. - Scheme string `river:"scheme,attr,optional"` + Scheme string `alloy:"scheme,attr,optional"` // todo(ctovena): add support for limits. // // An uncompressed response body larger than this many bytes will cause the // // scrape to fail. 0 means no limit. - // BodySizeLimit units.Base2Bytes `river:"body_size_limit,attr,optional"` + // BodySizeLimit units.Base2Bytes `alloy:"body_size_limit,attr,optional"` // // More than this many targets after the target relabeling will cause the // // scrapes to fail. - // TargetLimit uint `river:"target_limit,attr,optional"` + // TargetLimit uint `alloy:"target_limit,attr,optional"` // // More than this many labels post metric-relabeling will cause the scrape // // to fail. - // LabelLimit uint `river:"label_limit,attr,optional"` + // LabelLimit uint `alloy:"label_limit,attr,optional"` // // More than this label name length post metric-relabeling will cause the // // scrape to fail. - // LabelNameLengthLimit uint `river:"label_name_length_limit,attr,optional"` + // LabelNameLengthLimit uint `alloy:"label_name_length_limit,attr,optional"` // // More than this label value length post metric-relabeling will cause the // // scrape to fail. - // LabelValueLengthLimit uint `river:"label_value_length_limit,attr,optional"` + // LabelValueLengthLimit uint `alloy:"label_value_length_limit,attr,optional"` - HTTPClientConfig component_config.HTTPClientConfig `river:",squash"` + HTTPClientConfig component_config.HTTPClientConfig `alloy:",squash"` - ProfilingConfig ProfilingConfig `river:"profiling_config,block,optional"` + ProfilingConfig ProfilingConfig `alloy:"profiling_config,block,optional"` - Clustering cluster.ComponentBlock `river:"clustering,block,optional"` + Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` } type ProfilingConfig struct { - Memory ProfilingTarget `river:"profile.memory,block,optional"` - Block ProfilingTarget `river:"profile.block,block,optional"` - Goroutine ProfilingTarget `river:"profile.goroutine,block,optional"` - Mutex ProfilingTarget `river:"profile.mutex,block,optional"` - ProcessCPU ProfilingTarget `river:"profile.process_cpu,block,optional"` - FGProf ProfilingTarget `river:"profile.fgprof,block,optional"` - GoDeltaProfMemory ProfilingTarget `river:"profile.godeltaprof_memory,block,optional"` - GoDeltaProfMutex ProfilingTarget `river:"profile.godeltaprof_mutex,block,optional"` - GoDeltaProfBlock ProfilingTarget `river:"profile.godeltaprof_block,block,optional"` - Custom []CustomProfilingTarget `river:"profile.custom,block,optional"` - - PprofPrefix string `river:"path_prefix,attr,optional"` + Memory ProfilingTarget `alloy:"profile.memory,block,optional"` + Block ProfilingTarget `alloy:"profile.block,block,optional"` + Goroutine ProfilingTarget `alloy:"profile.goroutine,block,optional"` + Mutex ProfilingTarget `alloy:"profile.mutex,block,optional"` + ProcessCPU ProfilingTarget `alloy:"profile.process_cpu,block,optional"` + FGProf ProfilingTarget `alloy:"profile.fgprof,block,optional"` + GoDeltaProfMemory ProfilingTarget `alloy:"profile.godeltaprof_memory,block,optional"` + GoDeltaProfMutex ProfilingTarget `alloy:"profile.godeltaprof_mutex,block,optional"` + GoDeltaProfBlock ProfilingTarget `alloy:"profile.godeltaprof_block,block,optional"` + Custom []CustomProfilingTarget `alloy:"profile.custom,block,optional"` + + PprofPrefix string `alloy:"path_prefix,attr,optional"` } // AllTargets returns the set of all standard and custom profiling targets, @@ -175,16 +175,16 @@ func (cfg *ProfilingConfig) SetToDefault() { } type ProfilingTarget struct { - Enabled bool `river:"enabled,attr,optional"` - Path string `river:"path,attr,optional"` - Delta bool `river:"delta,attr,optional"` + Enabled bool `alloy:"enabled,attr,optional"` + Path string `alloy:"path,attr,optional"` + Delta bool `alloy:"delta,attr,optional"` } type CustomProfilingTarget struct { - Enabled bool `river:"enabled,attr"` - Path string `river:"path,attr"` - Delta bool `river:"delta,attr,optional"` - Name string `river:",label"` + Enabled bool `alloy:"enabled,attr"` + Path string `alloy:"path,attr"` + Delta bool `alloy:"delta,attr,optional"` + Name string `alloy:",label"` } var DefaultArguments = NewDefaultArguments() diff --git a/internal/component/pyroscope/write/write.go b/internal/component/pyroscope/write/write.go index d50cec9bd1..7ecd1b20fe 100644 --- a/internal/component/pyroscope/write/write.go +++ b/internal/component/pyroscope/write/write.go @@ -49,8 +49,8 @@ func init() { // Arguments represents the input state of the pyroscope.write // component. type Arguments struct { - ExternalLabels map[string]string `river:"external_labels,attr,optional"` - Endpoints []*EndpointOptions `river:"endpoint,block,optional"` + ExternalLabels map[string]string `alloy:"external_labels,attr,optional"` + Endpoints []*EndpointOptions `alloy:"endpoint,block,optional"` } // SetToDefault implements river.Defaulter. @@ -61,14 +61,14 @@ func (rc *Arguments) SetToDefault() { // EndpointOptions describes an individual location for where profiles // should be delivered to using the Pyroscope push API. type EndpointOptions struct { - Name string `river:"name,attr,optional"` - URL string `river:"url,attr"` - RemoteTimeout time.Duration `river:"remote_timeout,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - HTTPClientConfig *config.HTTPClientConfig `river:",squash"` - MinBackoff time.Duration `river:"min_backoff_period,attr,optional"` // start backoff at this level - MaxBackoff time.Duration `river:"max_backoff_period,attr,optional"` // increase exponentially to this level - MaxBackoffRetries int `river:"max_backoff_retries,attr,optional"` // give up after this many; zero means infinite retries + Name string `alloy:"name,attr,optional"` + URL string `alloy:"url,attr"` + RemoteTimeout time.Duration `alloy:"remote_timeout,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + HTTPClientConfig *config.HTTPClientConfig `alloy:",squash"` + MinBackoff time.Duration `alloy:"min_backoff_period,attr,optional"` // start backoff at this level + MaxBackoff time.Duration `alloy:"max_backoff_period,attr,optional"` // increase exponentially to this level + MaxBackoffRetries int `alloy:"max_backoff_retries,attr,optional"` // give up after this many; zero means infinite retries } func GetDefaultEndpointOptions() EndpointOptions { @@ -107,7 +107,7 @@ type Component struct { // Exports are the set of fields exposed by the pyroscope.write component. type Exports struct { - Receiver pyroscope.Appendable `river:"receiver,attr"` + Receiver pyroscope.Appendable `alloy:"receiver,attr"` } // New creates a new pyroscope.write component. diff --git a/internal/component/remote/http/http.go b/internal/component/remote/http/http.go index 2085f054fc..3f9669bbc8 100644 --- a/internal/component/remote/http/http.go +++ b/internal/component/remote/http/http.go @@ -36,16 +36,16 @@ func init() { // Arguments control the remote.http component. type Arguments struct { - URL string `river:"url,attr"` - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` - PollTimeout time.Duration `river:"poll_timeout,attr,optional"` - IsSecret bool `river:"is_secret,attr,optional"` + URL string `alloy:"url,attr"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` + PollTimeout time.Duration `alloy:"poll_timeout,attr,optional"` + IsSecret bool `alloy:"is_secret,attr,optional"` - Method string `river:"method,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - Body string `river:"body,attr,optional"` + Method string `alloy:"method,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + Body string `alloy:"body,attr,optional"` - Client common_config.HTTPClientConfig `river:"client,block,optional"` + Client common_config.HTTPClientConfig `alloy:"client,block,optional"` } // DefaultArguments holds default settings for Arguments. @@ -82,7 +82,7 @@ func (args *Arguments) Validate() error { // Exports holds settings exported by remote.http. type Exports struct { - Content alloytypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `alloy:"content,attr"` } // Component implements the remote.http component. diff --git a/internal/component/remote/kubernetes/kubernetes.go b/internal/component/remote/kubernetes/kubernetes.go index 42761a02fc..251cc2d9ae 100644 --- a/internal/component/remote/kubernetes/kubernetes.go +++ b/internal/component/remote/kubernetes/kubernetes.go @@ -27,13 +27,13 @@ const ( // Arguments control the component. type Arguments struct { - Namespace string `river:"namespace,attr"` - Name string `river:"name,attr"` - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` - PollTimeout time.Duration `river:"poll_timeout,attr,optional"` + Namespace string `alloy:"namespace,attr"` + Name string `alloy:"name,attr"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` + PollTimeout time.Duration `alloy:"poll_timeout,attr,optional"` // Client settings to connect to Kubernetes. - Client kubernetes.ClientArguments `river:"client,block,optional"` + Client kubernetes.ClientArguments `alloy:"client,block,optional"` } // DefaultArguments holds default settings for Arguments. @@ -60,7 +60,7 @@ func (args *Arguments) Validate() error { // Exports holds settings exported by this component. type Exports struct { - Data map[string]alloytypes.OptionalSecret `river:"data,attr"` + Data map[string]alloytypes.OptionalSecret `alloy:"data,attr"` } // Component implements the remote.kubernetes.* component. diff --git a/internal/component/remote/s3/types.go b/internal/component/remote/s3/types.go index 2722e772af..5e64ee55fd 100644 --- a/internal/component/remote/s3/types.go +++ b/internal/component/remote/s3/types.go @@ -9,25 +9,25 @@ import ( // Arguments implements the input for the S3 component. type Arguments struct { - Path string `river:"path,attr"` + Path string `alloy:"path,attr"` // PollFrequency determines the frequency to check for changes // defaults to 10m. - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` // IsSecret determines if the content should be displayed to the user. - IsSecret bool `river:"is_secret,attr,optional"` + IsSecret bool `alloy:"is_secret,attr,optional"` // Options allows the overriding of default settings. - Options Client `river:"client,block,optional"` + Options Client `alloy:"client,block,optional"` } // Client implements specific AWS configuration options type Client struct { - AccessKey string `river:"key,attr,optional"` - Secret alloytypes.Secret `river:"secret,attr,optional"` - Endpoint string `river:"endpoint,attr,optional"` - DisableSSL bool `river:"disable_ssl,attr,optional"` - UsePathStyle bool `river:"use_path_style,attr,optional"` - Region string `river:"region,attr,optional"` - SigningRegion string `river:"signing_region,attr,optional"` + AccessKey string `alloy:"key,attr,optional"` + Secret alloytypes.Secret `alloy:"secret,attr,optional"` + Endpoint string `alloy:"endpoint,attr,optional"` + DisableSSL bool `alloy:"disable_ssl,attr,optional"` + UsePathStyle bool `alloy:"use_path_style,attr,optional"` + Region string `alloy:"region,attr,optional"` + SigningRegion string `alloy:"signing_region,attr,optional"` } const minimumPollFrequency = 30 * time.Second @@ -52,5 +52,5 @@ func (a *Arguments) Validate() error { // Exports implements the file content type Exports struct { - Content alloytypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `alloy:"content,attr"` } diff --git a/internal/component/remote/vault/auth.go b/internal/component/remote/vault/auth.go index 9d27fe26b9..e494ff9aef 100644 --- a/internal/component/remote/vault/auth.go +++ b/internal/component/remote/vault/auth.go @@ -29,15 +29,15 @@ type authMethod interface { // component instance. These are embedded as an enum field so only one may be // set per AuthArguments. type AuthArguments struct { - AuthToken *AuthToken `river:"token,block,optional"` - AuthAppRole *AuthAppRole `river:"approle,block,optional"` - AuthAWS *AuthAWS `river:"aws,block,optional"` - AuthAzure *AuthAzure `river:"azure,block,optional"` - AuthGCP *AuthGCP `river:"gcp,block,optional"` - AuthKubernetes *AuthKubernetes `river:"kubernetes,block,optional"` - AuthLDAP *AuthLDAP `river:"ldap,block,optional"` - AuthUserPass *AuthUserPass `river:"userpass,block,optional"` - AuthCustom *AuthCustom `river:"custom,block,optional"` + AuthToken *AuthToken `alloy:"token,block,optional"` + AuthAppRole *AuthAppRole `alloy:"approle,block,optional"` + AuthAWS *AuthAWS `alloy:"aws,block,optional"` + AuthAzure *AuthAzure `alloy:"azure,block,optional"` + AuthGCP *AuthGCP `alloy:"gcp,block,optional"` + AuthKubernetes *AuthKubernetes `alloy:"kubernetes,block,optional"` + AuthLDAP *AuthLDAP `alloy:"ldap,block,optional"` + AuthUserPass *AuthUserPass `alloy:"userpass,block,optional"` + AuthCustom *AuthCustom `alloy:"custom,block,optional"` } func (a *AuthArguments) authMethod() authMethod { @@ -70,7 +70,7 @@ func (a *AuthArguments) authMethod() authMethod { // AuthToken authenticates against Vault with a token. type AuthToken struct { - Token alloytypes.Secret `river:"token,attr"` + Token alloytypes.Secret `alloy:"token,attr"` } func (a *AuthToken) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*vault.Secret, error) { @@ -80,10 +80,10 @@ func (a *AuthToken) vaultAuthenticate(ctx context.Context, cli *vault.Client) (* // AuthAppRole authenticates against Vault with AppRole. type AuthAppRole struct { - RoleID string `river:"role_id,attr"` - Secret alloytypes.Secret `river:"secret,attr"` - WrappingToken bool `river:"wrapping_token,attr,optional"` - MountPath string `river:"mount_path,attr,optional"` + RoleID string `alloy:"role_id,attr"` + Secret alloytypes.Secret `alloy:"secret,attr"` + WrappingToken bool `alloy:"wrapping_token,attr,optional"` + MountPath string `alloy:"mount_path,attr,optional"` } // DefaultAuthAppRole provides default settings for AuthAppRole. @@ -122,14 +122,14 @@ func (a *AuthAppRole) vaultAuthenticate(ctx context.Context, cli *vault.Client) type AuthAWS struct { // Type specifies the mechanism used to authenticate with AWS. Should be // either ec2 or iam. - Type string `river:"type,attr"` - Region string `river:"region,attr,optional"` - Role string `river:"role,attr,optional"` - IAMServerIDHeader string `river:"iam_server_id_header,attr,optional"` + Type string `alloy:"type,attr"` + Region string `alloy:"region,attr,optional"` + Role string `alloy:"role,attr,optional"` + IAMServerIDHeader string `alloy:"iam_server_id_header,attr,optional"` // EC2SignatureType specifies the signature to use against EC2. Only used // when Type is ec2. Valid options are identity and pkcs7 (default). - EC2SignatureType string `river:"ec2_signature_type,attr,optional"` - MountPath string `river:"mount_path,attr,optional"` + EC2SignatureType string `alloy:"ec2_signature_type,attr,optional"` + MountPath string `alloy:"mount_path,attr,optional"` } const ( @@ -219,9 +219,9 @@ func (a *AuthAWS) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*va // AuthAzure authenticates against Vault with Azure. type AuthAzure struct { - Role string `river:"role,attr"` - ResourceURL string `river:"resource_url,attr,optional"` - MountPath string `river:"mount_path,attr,optional"` + Role string `alloy:"role,attr"` + ResourceURL string `alloy:"resource_url,attr,optional"` + MountPath string `alloy:"mount_path,attr,optional"` } // DefaultAuthAzure provides default settings for AuthAzure. @@ -258,12 +258,12 @@ func (a *AuthAzure) vaultAuthenticate(ctx context.Context, cli *vault.Client) (* // AuthGCP authenticates against Vault with GCP. type AuthGCP struct { - Role string `river:"role,attr"` + Role string `alloy:"role,attr"` // Type specifies the mechanism used to authenticate with GCS. Should be // either gce or iam. - Type string `river:"type,attr"` - IAMServiceAccount string `river:"iam_service_account,attr,optional"` - MountPath string `river:"mount_path,attr,optional"` + Type string `alloy:"type,attr"` + IAMServiceAccount string `alloy:"iam_service_account,attr,optional"` + MountPath string `alloy:"mount_path,attr,optional"` } const ( @@ -330,9 +330,9 @@ func (a *AuthGCP) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*va // AuthKubernetes authenticates against Vault with Kubernetes. type AuthKubernetes struct { - Role string `river:"role,attr"` - ServiceAccountTokenFile string `river:"service_account_file,attr,optional"` - MountPath string `river:"mount_path,attr,optional"` + Role string `alloy:"role,attr"` + ServiceAccountTokenFile string `alloy:"service_account_file,attr,optional"` + MountPath string `alloy:"mount_path,attr,optional"` } // DefaultAuthKubernetes provides default settings for AuthKubernetes. @@ -369,9 +369,9 @@ func (a *AuthKubernetes) vaultAuthenticate(ctx context.Context, cli *vault.Clien // AuthLDAP authenticates against Vault with LDAP. type AuthLDAP struct { - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` - MountPath string `river:"mount_path,attr,optional"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` + MountPath string `alloy:"mount_path,attr,optional"` } // DefaultAuthLDAP provides default settings for AuthLDAP. @@ -406,9 +406,9 @@ func (a *AuthLDAP) vaultAuthenticate(ctx context.Context, cli *vault.Client) (*v // AuthUserPass authenticates against Vault with a username and password. type AuthUserPass struct { - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` - MountPath string `river:"mount_path,attr,optional"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` + MountPath string `alloy:"mount_path,attr,optional"` } // DefaultAuthUserPass provides default settings for AuthUserPass. @@ -444,8 +444,8 @@ func (a *AuthUserPass) vaultAuthenticate(ctx context.Context, cli *vault.Client) // AuthCustom provides a custom authentication method. type AuthCustom struct { // Path to use for logging in (e.g., auth/kubernetes/login, etc.) - Path string `river:"path,attr"` - Data map[string]alloytypes.Secret `river:"data,attr"` + Path string `alloy:"path,attr"` + Data map[string]alloytypes.Secret `alloy:"data,attr"` } // Login implements vault.AuthMethod. diff --git a/internal/component/remote/vault/refresher.go b/internal/component/remote/vault/refresher.go index e752e1ccc7..479e7184eb 100644 --- a/internal/component/remote/vault/refresher.go +++ b/internal/component/remote/vault/refresher.go @@ -258,11 +258,11 @@ func (tm *tokenManager) DebugInfo() secretInfo { } type secretInfo struct { - LatestRequestID string `river:"latest_request_id,attr"` - LastUpdateTime time.Time `river:"last_update_time,attr"` - SecretExpireTime time.Time `river:"secret_expire_time,attr"` - Renewable bool `river:"renewable,attr"` - Warnings []string `river:"warnings,attr"` + LatestRequestID string `alloy:"latest_request_id,attr"` + LastUpdateTime time.Time `alloy:"last_update_time,attr"` + SecretExpireTime time.Time `alloy:"secret_expire_time,attr"` + Renewable bool `alloy:"renewable,attr"` + Warnings []string `alloy:"warnings,attr"` } func getSecretInfo(secret *vault.Secret, updateTime time.Time) secretInfo { diff --git a/internal/component/remote/vault/vault.go b/internal/component/remote/vault/vault.go index ccc06173e5..cfe4fe85ee 100644 --- a/internal/component/remote/vault/vault.go +++ b/internal/component/remote/vault/vault.go @@ -31,21 +31,21 @@ func init() { // Arguments configures remote.vault. type Arguments struct { - Server string `river:"server,attr"` - Namespace string `river:"namespace,attr,optional"` + Server string `alloy:"server,attr"` + Namespace string `alloy:"namespace,attr,optional"` - Path string `river:"path,attr"` + Path string `alloy:"path,attr"` - RereadFrequency time.Duration `river:"reread_frequency,attr,optional"` + RereadFrequency time.Duration `alloy:"reread_frequency,attr,optional"` - ClientOptions ClientOptions `river:"client_options,block,optional"` + ClientOptions ClientOptions `alloy:"client_options,block,optional"` // The user *must* provide exactly one Auth blocks. This must be a slice // because the enum flag requires a slice and being tagged as optional. // // TODO(rfratto): allow the enum flag to be used with a non-slice type. - Auth []AuthArguments `river:"auth,enum,optional"` + Auth []AuthArguments `alloy:"auth,enum,optional"` } // DefaultArguments holds default settings for Arguments. @@ -112,10 +112,10 @@ func (a *Arguments) secretStore(cli *vault.Client) secretStore { // ClientOptions sets extra options on the Client. type ClientOptions struct { - MinRetryWait time.Duration `river:"min_retry_wait,attr,optional"` - MaxRetryWait time.Duration `river:"max_retry_wait,attr,optional"` - MaxRetries int `river:"max_retries,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + MinRetryWait time.Duration `alloy:"min_retry_wait,attr,optional"` + MaxRetryWait time.Duration `alloy:"max_retry_wait,attr,optional"` + MaxRetries int `alloy:"max_retries,attr,optional"` + Timeout time.Duration `alloy:"timeout,attr,optional"` } // Exports is the values exported by remote.vault. @@ -126,7 +126,7 @@ type Exports struct { // // However, it seems that most secrets engines don't actually return // arbitrary data, so this limitation shouldn't cause any issues in practice. - Data map[string]alloytypes.Secret `river:"data,attr"` + Data map[string]alloytypes.Secret `alloy:"data,attr"` } // Component implements the remote.vault component. @@ -318,6 +318,6 @@ func (c *Component) DebugInfo() interface{} { } type debugInfo struct { - AuthToken secretInfo `river:"auth_token,block"` - Secret secretInfo `river:"secret,block"` + AuthToken secretInfo `alloy:"auth_token,block"` + Secret secretInfo `alloy:"secret,block"` } diff --git a/internal/flow/componenttest/testfailmodule.go b/internal/flow/componenttest/testfailmodule.go index b5f4de1757..ddd5c4ae18 100644 --- a/internal/flow/componenttest/testfailmodule.go +++ b/internal/flow/componenttest/testfailmodule.go @@ -40,8 +40,8 @@ func init() { } type TestFailArguments struct { - Content string `river:"content,attr"` - Fail bool `river:"fail,attr,optional"` + Content string `alloy:"content,attr"` + Fail bool `alloy:"fail,attr,optional"` } type TestFailModule struct { diff --git a/internal/flow/flow_services_test.go b/internal/flow/flow_services_test.go index 80404b80f4..b6674ad9dc 100644 --- a/internal/flow/flow_services_test.go +++ b/internal/flow/flow_services_test.go @@ -49,7 +49,7 @@ func TestServices(t *testing.T) { func TestServices_Configurable(t *testing.T) { defer verifyNoGoroutineLeaks(t) type ServiceOptions struct { - Name string `river:"name,attr"` + Name string `alloy:"name,attr"` } ctx, cancel := context.WithCancel(context.Background()) @@ -104,7 +104,7 @@ func TestServices_Configurable(t *testing.T) { func TestServices_Configurable_Optional(t *testing.T) { defer verifyNoGoroutineLeaks(t) type ServiceOptions struct { - Name string `river:"name,attr,optional"` + Name string `alloy:"name,attr,optional"` } ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/flow/internal/controller/loader_test.go b/internal/flow/internal/controller/loader_test.go index b0eb2ac884..1fdd42b0ee 100644 --- a/internal/flow/internal/controller/loader_test.go +++ b/internal/flow/internal/controller/loader_test.go @@ -328,7 +328,7 @@ func TestLoader_Services(t *testing.T) { return service.Definition{ Name: "testsvc", ConfigType: struct { - Name string `river:"name,attr,optional"` + Name string `alloy:"name,attr,optional"` }{}, Stability: featuregate.StabilityBeta, } diff --git a/internal/flow/internal/controller/node_config_argument.go b/internal/flow/internal/controller/node_config_argument.go index 75d8b7e6a2..fe85dcac60 100644 --- a/internal/flow/internal/controller/node_config_argument.go +++ b/internal/flow/internal/controller/node_config_argument.go @@ -37,9 +37,9 @@ func NewArgumentConfigNode(block *ast.BlockStmt, globals ComponentGlobals) *Argu } type argumentBlock struct { - Optional bool `river:"optional,attr,optional"` - Default any `river:"default,attr,optional"` - Comment string `river:"comment,attr,optional"` + Optional bool `alloy:"optional,attr,optional"` + Default any `alloy:"default,attr,optional"` + Comment string `alloy:"comment,attr,optional"` } // Evaluate implements BlockNode and updates the arguments for the managed config block diff --git a/internal/flow/internal/controller/node_config_export.go b/internal/flow/internal/controller/node_config_export.go index 8983c0e33c..eb6df4ce45 100644 --- a/internal/flow/internal/controller/node_config_export.go +++ b/internal/flow/internal/controller/node_config_export.go @@ -36,7 +36,7 @@ func NewExportConfigNode(block *ast.BlockStmt, globals ComponentGlobals) *Export } type exportBlock struct { - Value any `river:"value,attr"` + Value any `alloy:"value,attr"` } // Evaluate implements BlockNode and updates the arguments for the managed config block diff --git a/internal/flow/internal/controller/value_cache_test.go b/internal/flow/internal/controller/value_cache_test.go index 7b4daa252a..2cbc92d2a4 100644 --- a/internal/flow/internal/controller/value_cache_test.go +++ b/internal/flow/internal/controller/value_cache_test.go @@ -7,14 +7,14 @@ import ( ) type fooArgs struct { - Something bool `river:"something,attr"` + Something bool `alloy:"something,attr"` } type fooExports struct { - SomethingElse bool `river:"something_else,attr"` + SomethingElse bool `alloy:"something_else,attr"` } type barArgs struct { - Number int `river:"number,attr"` + Number int `alloy:"number,attr"` } func TestValueCache(t *testing.T) { diff --git a/internal/flow/internal/importsource/import_file.go b/internal/flow/internal/importsource/import_file.go index 9887a38410..06e83bf166 100644 --- a/internal/flow/internal/importsource/import_file.go +++ b/internal/flow/internal/importsource/import_file.go @@ -57,11 +57,11 @@ func NewImportFile(managedOpts component.Options, eval *vm.Evaluator, onContentC type FileArguments struct { // Filename indicates the file to watch. - Filename string `river:"filename,attr"` + Filename string `alloy:"filename,attr"` // Type indicates how to detect changes to the file. - Type filedetector.Detector `river:"detector,attr,optional"` + Type filedetector.Detector `alloy:"detector,attr,optional"` // PollFrequency determines the frequency to check for changes when Type is Poll. - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` } var DefaultFileArguments = FileArguments{ diff --git a/internal/flow/internal/importsource/import_git.go b/internal/flow/internal/importsource/import_git.go index 93a5da668d..d0eec75a5e 100644 --- a/internal/flow/internal/importsource/import_git.go +++ b/internal/flow/internal/importsource/import_git.go @@ -43,11 +43,11 @@ var ( ) type GitArguments struct { - Repository string `river:"repository,attr"` - Revision string `river:"revision,attr,optional"` - Path string `river:"path,attr"` - PullFrequency time.Duration `river:"pull_frequency,attr,optional"` - GitAuthConfig vcs.GitAuthConfig `river:",squash"` + Repository string `alloy:"repository,attr"` + Revision string `alloy:"revision,attr,optional"` + Path string `alloy:"path,attr"` + PullFrequency time.Duration `alloy:"pull_frequency,attr,optional"` + GitAuthConfig vcs.GitAuthConfig `alloy:",squash"` } var DefaultGitArguments = GitArguments{ diff --git a/internal/flow/internal/importsource/import_http.go b/internal/flow/internal/importsource/import_http.go index 15e29a3d67..30c2e19338 100644 --- a/internal/flow/internal/importsource/import_http.go +++ b/internal/flow/internal/importsource/import_http.go @@ -36,15 +36,15 @@ func NewImportHTTP(managedOpts component.Options, eval *vm.Evaluator, onContentC // HTTPArguments holds values which are used to configure the remote.http component. type HTTPArguments struct { - URL string `river:"url,attr"` - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` - PollTimeout time.Duration `river:"poll_timeout,attr,optional"` + URL string `alloy:"url,attr"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` + PollTimeout time.Duration `alloy:"poll_timeout,attr,optional"` - Method string `river:"method,attr,optional"` - Headers map[string]string `river:"headers,attr,optional"` - Body string `river:"body,attr,optional"` + Method string `alloy:"method,attr,optional"` + Headers map[string]string `alloy:"headers,attr,optional"` + Body string `alloy:"body,attr,optional"` - Client common_config.HTTPClientConfig `river:"client,block,optional"` + Client common_config.HTTPClientConfig `alloy:"client,block,optional"` } // DefaultHTTPArguments holds default settings for HTTPArguments. diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index 38b52297f7..b66970821f 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -27,7 +27,7 @@ func NewImportString(eval *vm.Evaluator, onContentChange func(map[string]string) } type importStringConfigBlock struct { - Content alloytypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `alloy:"content,attr"` } func (im *ImportString) Evaluate(scope *vm.Scope) error { diff --git a/internal/flow/internal/testcomponents/count.go b/internal/flow/internal/testcomponents/count.go index c284136f0d..fb7d81a87f 100644 --- a/internal/flow/internal/testcomponents/count.go +++ b/internal/flow/internal/testcomponents/count.go @@ -27,12 +27,12 @@ func init() { } type CountConfig struct { - Frequency time.Duration `river:"frequency,attr"` - Max int `river:"max,attr"` + Frequency time.Duration `alloy:"frequency,attr"` + Max int `alloy:"max,attr"` } type CountExports struct { - Count int `river:"count,attr,optional"` + Count int `alloy:"count,attr,optional"` } type Count struct { diff --git a/internal/flow/internal/testcomponents/module/file/file.go b/internal/flow/internal/testcomponents/module/file/file.go index c211d56519..bb223f53be 100644 --- a/internal/flow/internal/testcomponents/module/file/file.go +++ b/internal/flow/internal/testcomponents/module/file/file.go @@ -28,10 +28,10 @@ func init() { // Arguments holds values which are used to configure the module.file component. type Arguments struct { - LocalFileArguments file.Arguments `river:",squash"` + LocalFileArguments file.Arguments `alloy:",squash"` // Arguments to pass into the module. - Arguments map[string]any `river:"arguments,block,optional"` + Arguments map[string]any `alloy:"arguments,block,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/flow/internal/testcomponents/module/git/git.go b/internal/flow/internal/testcomponents/module/git/git.go index 2a57a21278..5fafe6894a 100644 --- a/internal/flow/internal/testcomponents/module/git/git.go +++ b/internal/flow/internal/testcomponents/module/git/git.go @@ -32,13 +32,13 @@ func init() { // Arguments configures the module.git component. type Arguments struct { - Repository string `river:"repository,attr"` - Revision string `river:"revision,attr,optional"` - Path string `river:"path,attr"` - PullFrequency time.Duration `river:"pull_frequency,attr,optional"` + Repository string `alloy:"repository,attr"` + Revision string `alloy:"revision,attr,optional"` + Path string `alloy:"path,attr"` + PullFrequency time.Duration `alloy:"pull_frequency,attr,optional"` - Arguments map[string]any `river:"arguments,block,optional"` - GitAuthConfig vcs.GitAuthConfig `river:",squash"` + Arguments map[string]any `alloy:"arguments,block,optional"` + GitAuthConfig vcs.GitAuthConfig `alloy:",squash"` } // DefaultArguments holds default settings for Arguments. @@ -255,8 +255,8 @@ func (c *Component) CurrentHealth() component.Health { // DebugInfo implements component.DebugComponent. func (c *Component) DebugInfo() interface{} { type DebugInfo struct { - SHA string `river:"sha,attr"` - RepoError string `river:"repo_error,attr,optional"` + SHA string `alloy:"sha,attr"` + RepoError string `alloy:"repo_error,attr,optional"` } c.mut.RLock() diff --git a/internal/flow/internal/testcomponents/module/http/http.go b/internal/flow/internal/testcomponents/module/http/http.go index 5bfc76b79a..fd69a6cd64 100644 --- a/internal/flow/internal/testcomponents/module/http/http.go +++ b/internal/flow/internal/testcomponents/module/http/http.go @@ -28,9 +28,9 @@ func init() { // Arguments holds values which are used to configure the module.http component. type Arguments struct { - RemoteHTTPArguments remote_http.Arguments `river:",squash"` + RemoteHTTPArguments remote_http.Arguments `alloy:",squash"` - Arguments map[string]any `river:"arguments,block,optional"` + Arguments map[string]any `alloy:"arguments,block,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/flow/internal/testcomponents/module/module.go b/internal/flow/internal/testcomponents/module/module.go index 1357d7c026..e83df417a8 100644 --- a/internal/flow/internal/testcomponents/module/module.go +++ b/internal/flow/internal/testcomponents/module/module.go @@ -25,7 +25,7 @@ type ModuleComponent struct { // Exports holds values which are exported from the run module. type Exports struct { // Exports exported from the running module. - Exports map[string]any `river:"exports,block"` + Exports map[string]any `alloy:"exports,block"` } // NewModuleComponent initializes a new ModuleComponent. diff --git a/internal/flow/internal/testcomponents/module/string/string.go b/internal/flow/internal/testcomponents/module/string/string.go index 65c25dd9dc..e5b92416d8 100644 --- a/internal/flow/internal/testcomponents/module/string/string.go +++ b/internal/flow/internal/testcomponents/module/string/string.go @@ -26,10 +26,10 @@ func init() { // component. type Arguments struct { // Content to load for the module. - Content alloytypes.OptionalSecret `river:"content,attr"` + Content alloytypes.OptionalSecret `alloy:"content,attr"` // Arguments to pass into the module. - Arguments map[string]any `river:"arguments,block,optional"` + Arguments map[string]any `alloy:"arguments,block,optional"` } // Component implements the module.string component. diff --git a/internal/flow/internal/testcomponents/passthrough.go b/internal/flow/internal/testcomponents/passthrough.go index 108bfd61f8..4a079a4f53 100644 --- a/internal/flow/internal/testcomponents/passthrough.go +++ b/internal/flow/internal/testcomponents/passthrough.go @@ -25,14 +25,14 @@ func init() { // PassthroughConfig configures the testcomponents.passthrough component. type PassthroughConfig struct { - Input string `river:"input,attr"` - Lag time.Duration `river:"lag,attr,optional"` + Input string `alloy:"input,attr"` + Lag time.Duration `alloy:"lag,attr,optional"` } // PassthroughExports describes exported fields for the // testcomponents.passthrough component. type PassthroughExports struct { - Output string `river:"output,attr,optional"` + Output string `alloy:"output,attr,optional"` } // Passthrough implements the testcomponents.passthrough component, where it @@ -88,5 +88,5 @@ func (t *Passthrough) DebugInfo() interface{} { } type passthroughDebugInfo struct { - ComponentVersion string `river:"component_version,attr"` + ComponentVersion string `alloy:"component_version,attr"` } diff --git a/internal/flow/internal/testcomponents/sumation.go b/internal/flow/internal/testcomponents/sumation.go index 8318ff070e..7df8fb0f2f 100644 --- a/internal/flow/internal/testcomponents/sumation.go +++ b/internal/flow/internal/testcomponents/sumation.go @@ -24,12 +24,12 @@ func init() { } type SummationConfig struct { - Input int `river:"input,attr"` + Input int `alloy:"input,attr"` } type SummationExports struct { - Sum int `river:"sum,attr"` - LastAdded int `river:"last_added,attr"` + Sum int `alloy:"sum,attr"` + LastAdded int `alloy:"last_added,attr"` } type Summation struct { diff --git a/internal/flow/internal/testcomponents/tick.go b/internal/flow/internal/testcomponents/tick.go index e75622644e..1e16f60cab 100644 --- a/internal/flow/internal/testcomponents/tick.go +++ b/internal/flow/internal/testcomponents/tick.go @@ -27,12 +27,12 @@ func init() { // TickConfig configures the testcomponents.tick component. type TickConfig struct { - Frequency time.Duration `river:"frequency,attr"` + Frequency time.Duration `alloy:"frequency,attr"` } // TickExports describes exported fields for the testcomponents.tick component. type TickExports struct { - Time time.Time `river:"tick_time,attr,optional"` + Time time.Time `alloy:"tick_time,attr,optional"` } // Tick implements the testcomponents.tick component, where the wallclock time diff --git a/internal/flow/logging/options.go b/internal/flow/logging/options.go index 1cde94fffa..0fbb576456 100644 --- a/internal/flow/logging/options.go +++ b/internal/flow/logging/options.go @@ -12,10 +12,10 @@ import ( // Options is a set of options used to construct and configure a Logger. type Options struct { - Level Level `river:"level,attr,optional"` - Format Format `river:"format,attr,optional"` + Level Level `alloy:"level,attr,optional"` + Format Format `alloy:"format,attr,optional"` - WriteTo []loki.LogsReceiver `river:"write_to,attr,optional"` + WriteTo []loki.LogsReceiver `alloy:"write_to,attr,optional"` } // DefaultOptions holds defaults for creating a Logger. diff --git a/internal/flow/module_test.go b/internal/flow/module_test.go index 663d058757..6b2b25981c 100644 --- a/internal/flow/module_test.go +++ b/internal/flow/module_test.go @@ -291,11 +291,11 @@ func init() { } type TestArguments struct { - Content string `river:"content,attr"` + Content string `alloy:"content,attr"` } type TestExports struct { - Exports map[string]interface{} `river:"exports,attr"` + Exports map[string]interface{} `alloy:"exports,attr"` } type testModule struct { diff --git a/internal/flow/tracing/tracing.go b/internal/flow/tracing/tracing.go index 9d1a174e33..1da4c26d1b 100644 --- a/internal/flow/tracing/tracing.go +++ b/internal/flow/tracing/tracing.go @@ -38,28 +38,28 @@ var ( type Options struct { // SamplingFraction determines which rate of traces to sample. A value of 1 // means to keep 100% of traces. A value of 0 means to keep 0% of traces. - SamplingFraction float64 `river:"sampling_fraction,attr,optional"` + SamplingFraction float64 `alloy:"sampling_fraction,attr,optional"` // Sampler holds optional samplers to configure on top of the sampling // fraction. - Sampler SamplerOptions `river:"sampler,block,optional"` + Sampler SamplerOptions `alloy:"sampler,block,optional"` // WriteTo holds a set of OpenTelemetry Collector consumers where internal // traces should be sent. - WriteTo []otelcol.Consumer `river:"write_to,attr,optional"` + WriteTo []otelcol.Consumer `alloy:"write_to,attr,optional"` } type SamplerOptions struct { - JaegerRemote *JaegerRemoteSamplerOptions `river:"jaeger_remote,block,optional"` + JaegerRemote *JaegerRemoteSamplerOptions `alloy:"jaeger_remote,block,optional"` // TODO(rfratto): if support for another sampler is added, SamplerOptions // must enforce that only one inner block is provided. } type JaegerRemoteSamplerOptions struct { - URL string `river:"url,attr,optional"` - MaxOperations int `river:"max_operations,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + URL string `alloy:"url,attr,optional"` + MaxOperations int `alloy:"max_operations,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` } // SetToDefault implements river.Defaulter. diff --git a/internal/service/cluster/cluster.go b/internal/service/cluster/cluster.go index 30c7a1cc45..d98b5faf69 100644 --- a/internal/service/cluster/cluster.go +++ b/internal/service/cluster/cluster.go @@ -363,7 +363,7 @@ type Component interface { // component. ComponentBlock is intended to be exposed as a block called // "clustering". type ComponentBlock struct { - Enabled bool `river:"enabled,attr"` + Enabled bool `alloy:"enabled,attr"` } // Cluster is a read-only view of a cluster. diff --git a/internal/service/http/http.go b/internal/service/http/http.go index 26c93e3911..516d037a7a 100644 --- a/internal/service/http/http.go +++ b/internal/service/http/http.go @@ -52,7 +52,7 @@ type Options struct { // Arguments holds runtime settings for the HTTP service. type Arguments struct { - TLS *TLSArguments `river:"tls,block,optional"` + TLS *TLSArguments `alloy:"tls,block,optional"` } type Service struct { diff --git a/internal/service/http/tls.go b/internal/service/http/tls.go index c46b8f3fcd..97c181721e 100644 --- a/internal/service/http/tls.go +++ b/internal/service/http/tls.go @@ -15,42 +15,42 @@ import ( // TLSArguments configures TLS settings for the HTTP service. type TLSArguments struct { - Cert string `river:"cert_pem,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - Key alloytypes.Secret `river:"key_pem,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - ClientCA string `river:"client_ca_pem,attr,optional"` - ClientCAFile string `river:"client_ca_file,attr,optional"` - ClientAuth ClientAuth `river:"client_auth_type,attr,optional"` - CipherSuites []TLSCipher `river:"cipher_suites,attr,optional"` - CurvePreferences []TLSCurve `river:"curve_preferences,attr,optional"` - MinVersion TLSVersion `river:"min_version,attr,optional"` - MaxVersion TLSVersion `river:"max_version,attr,optional"` + Cert string `alloy:"cert_pem,attr,optional"` + CertFile string `alloy:"cert_file,attr,optional"` + Key alloytypes.Secret `alloy:"key_pem,attr,optional"` + KeyFile string `alloy:"key_file,attr,optional"` + ClientCA string `alloy:"client_ca_pem,attr,optional"` + ClientCAFile string `alloy:"client_ca_file,attr,optional"` + ClientAuth ClientAuth `alloy:"client_auth_type,attr,optional"` + CipherSuites []TLSCipher `alloy:"cipher_suites,attr,optional"` + CurvePreferences []TLSCurve `alloy:"curve_preferences,attr,optional"` + MinVersion TLSVersion `alloy:"min_version,attr,optional"` + MaxVersion TLSVersion `alloy:"max_version,attr,optional"` // Windows Certificate Filter - WindowsFilter *WindowsCertificateFilter `river:"windows_certificate_filter,block,optional"` + WindowsFilter *WindowsCertificateFilter `alloy:"windows_certificate_filter,block,optional"` } // WindowsCertificateFilter represents the configuration for accessing the Windows store type WindowsCertificateFilter struct { - Server *WindowsServerFilter `river:"server,block"` - Client *WindowsClientFilter `river:"client,block"` + Server *WindowsServerFilter `alloy:"server,block"` + Client *WindowsClientFilter `alloy:"client,block"` } // WindowsClientFilter is used to select a client root CA certificate type WindowsClientFilter struct { - IssuerCommonNames []string `river:"issuer_common_names,attr,optional"` - SubjectRegEx string `river:"subject_regex,attr,optional"` - TemplateID string `river:"template_id,attr,optional"` + IssuerCommonNames []string `alloy:"issuer_common_names,attr,optional"` + SubjectRegEx string `alloy:"subject_regex,attr,optional"` + TemplateID string `alloy:"template_id,attr,optional"` } // WindowsServerFilter is used to select a server certificate type WindowsServerFilter struct { - Store string `river:"store,attr,optional"` - SystemStore string `river:"system_store,attr,optional"` - IssuerCommonNames []string `river:"issuer_common_names,attr,optional"` - TemplateID string `river:"template_id,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Store string `alloy:"store,attr,optional"` + SystemStore string `alloy:"system_store,attr,optional"` + IssuerCommonNames []string `alloy:"issuer_common_names,attr,optional"` + TemplateID string `alloy:"template_id,attr,optional"` + RefreshInterval time.Duration `alloy:"refresh_interval,attr,optional"` } var _ syntax.Defaulter = (*WindowsServerFilter)(nil) diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index 2d2365d912..75bafd8071 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -62,11 +62,11 @@ type Options struct { // Arguments holds runtime settings for the remotecfg service. type Arguments struct { - URL string `river:"url,attr,optional"` - ID string `river:"id,attr,optional"` - Metadata map[string]string `river:"metadata,attr,optional"` - PollFrequency time.Duration `river:"poll_frequency,attr,optional"` - HTTPClientConfig *config.HTTPClientConfig `river:",squash"` + URL string `alloy:"url,attr,optional"` + ID string `alloy:"id,attr,optional"` + Metadata map[string]string `alloy:"metadata,attr,optional"` + PollFrequency time.Duration `alloy:"poll_frequency,attr,optional"` + HTTPClientConfig *config.HTTPClientConfig `alloy:",squash"` } // GetDefaultArguments populates the default values for the Arguments struct. diff --git a/internal/vcs/auth.go b/internal/vcs/auth.go index 68b1646e64..b9d0610734 100644 --- a/internal/vcs/auth.go +++ b/internal/vcs/auth.go @@ -10,8 +10,8 @@ import ( ) type GitAuthConfig struct { - BasicAuth *BasicAuth `river:"basic_auth,block,optional"` - SSHKey *SSHKey `river:"ssh_key,block,optional"` + BasicAuth *BasicAuth `alloy:"basic_auth,block,optional"` + SSHKey *SSHKey `alloy:"ssh_key,block,optional"` } // Convert converts HTTPClientConfig to the native Prometheus type. If h is @@ -33,8 +33,8 @@ func (h *GitAuthConfig) Convert() transport.AuthMethod { } type BasicAuth struct { - Username string `river:"username,attr"` - Password alloytypes.Secret `river:"password,attr"` + Username string `alloy:"username,attr"` + Password alloytypes.Secret `alloy:"password,attr"` } // Convert converts our type to the native prometheus type @@ -49,10 +49,10 @@ func (b *BasicAuth) Convert() (t transport.AuthMethod) { } type SSHKey struct { - Username string `river:"username,attr"` - Key alloytypes.Secret `river:"key,attr,optional"` - Keyfile string `river:"key_file,attr,optional"` - Passphrase alloytypes.Secret `river:"passphrase,attr,optional"` + Username string `alloy:"username,attr"` + Key alloytypes.Secret `alloy:"key,attr,optional"` + Keyfile string `alloy:"key_file,attr,optional"` + Passphrase alloytypes.Secret `alloy:"passphrase,attr,optional"` } // Convert converts our type to the native prometheus type From 25ef584931583659babb924f11c5f2e8f0a3115d Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 15:18:47 -0400 Subject: [PATCH 051/136] tools/packaging_test: fix malformed test command (#70) --- ...gent_linux_packages_test.go => alloy_linux_packages_test.go} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename internal/tools/packaging_test/{agent_linux_packages_test.go => alloy_linux_packages_test.go} (98%) diff --git a/internal/tools/packaging_test/agent_linux_packages_test.go b/internal/tools/packaging_test/alloy_linux_packages_test.go similarity index 98% rename from internal/tools/packaging_test/agent_linux_packages_test.go rename to internal/tools/packaging_test/alloy_linux_packages_test.go index d9552b0a8f..ecd2ba282b 100644 --- a/internal/tools/packaging_test/agent_linux_packages_test.go +++ b/internal/tools/packaging_test/alloy_linux_packages_test.go @@ -79,7 +79,7 @@ func (env *AlloyEnvironment) TestInstall(t *testing.T) { res := env.Install() require.Equal(t, 0, res.ExitCode, "installing failed") - res = env.ExecScript(`[ -f /usr/bin/alloy]`) + res = env.ExecScript(`[ -f /usr/bin/alloy ]`) require.Equal(t, 0, res.ExitCode, "expected Alloy to be installed") res = env.ExecScript(`[ -f /etc/alloy.river ]`) require.Equal(t, 0, res.ExitCode, "expected Alloy configuration file to exist") From 8c2c7a0f69e3936596df24ae57b7d21b9840c702 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 15:29:36 -0400 Subject: [PATCH 052/136] all: rename main module to github.com/grafana/alloy --- cmd/alloy-service/service_test.go | 4 +- cmd/alloy/main.go | 6 +- go.mod | 2 +- internal/agentseed/agentseed.go | 2 +- internal/cmd/agentlint/go.mod | 2 +- .../internal/findcomponents/findcomponents.go | 6 +- internal/cmd/agentlint/main.go | 4 +- .../tests/otlp-metrics/otlp_metrics_test.go | 2 +- .../otlp-metrics/otlp_to_prom_metrics_test.go | 2 +- .../tests/read-log-file/read_log_file_test.go | 2 +- .../tests/redis/redis_metrics_test.go | 2 +- .../scrape_prom_metrics_test.go | 2 +- .../tests/unix/unix_metrics_test.go | 2 +- internal/component/all/all.go | 264 +++++++++--------- internal/component/all/all_test.go | 2 +- internal/component/common/kubernetes/event.go | 2 +- .../component/common/kubernetes/kubernetes.go | 6 +- .../component/common/loki/client/batch.go | 2 +- .../common/loki/client/batch_test.go | 2 +- .../component/common/loki/client/client.go | 8 +- .../common/loki/client/client_test.go | 2 +- .../common/loki/client/fake/client.go | 2 +- .../client/internal/marker_file_handler.go | 2 +- .../loki/client/internal/marker_handler.go | 4 +- .../common/loki/client/internal/metrics.go | 2 +- .../component/common/loki/client/logger.go | 6 +- .../common/loki/client/logger_test.go | 2 +- .../component/common/loki/client/manager.go | 10 +- .../common/loki/client/manager_test.go | 8 +- .../component/common/loki/client/metrics.go | 2 +- .../common/loki/client/queue_client.go | 2 +- .../common/loki/client/queue_client_test.go | 6 +- .../common/loki/positions/positions.go | 2 +- .../common/loki/wal/internal/watcher_state.go | 2 +- internal/component/common/loki/wal/reader.go | 2 +- internal/component/common/loki/wal/wal.go | 2 +- internal/component/common/loki/wal/watcher.go | 4 +- .../common/loki/wal/watcher_metrics.go | 2 +- .../component/common/loki/wal/watcher_test.go | 6 +- internal/component/common/loki/wal/writer.go | 4 +- .../component/common/loki/wal/writer_test.go | 4 +- internal/component/common/net/server.go | 2 +- internal/component/common/net/server_test.go | 2 +- internal/component/component_health_test.go | 2 +- internal/component/discovery/aws/ec2.go | 8 +- internal/component/discovery/aws/ec2_test.go | 2 +- internal/component/discovery/aws/lightsail.go | 8 +- internal/component/discovery/azure/azure.go | 8 +- .../component/discovery/azure/azure_test.go | 2 +- internal/component/discovery/consul/consul.go | 8 +- .../discovery/consulagent/consulagent.go | 8 +- .../consulagent/promtail_consulagent.go | 2 +- .../discovery/digitalocean/digitalocean.go | 8 +- .../digitalocean/digitalocean_test.go | 2 +- internal/component/discovery/discovery.go | 4 +- internal/component/discovery/dns/dns.go | 6 +- internal/component/discovery/docker/docker.go | 8 +- .../discovery/dockerswarm/dockerswarm.go | 8 +- .../discovery/dockerswarm/dockerswarm_test.go | 2 +- internal/component/discovery/eureka/eureka.go | 8 +- .../component/discovery/eureka/eureka_test.go | 2 +- internal/component/discovery/file/file.go | 6 +- internal/component/discovery/gce/gce.go | 6 +- .../component/discovery/hetzner/hetzner.go | 8 +- internal/component/discovery/http/http.go | 8 +- .../component/discovery/http/http_test.go | 6 +- internal/component/discovery/ionos/ionos.go | 8 +- .../component/discovery/ionos/ionos_test.go | 2 +- .../component/discovery/kubelet/kubelet.go | 8 +- .../discovery/kubelet/kubelet_test.go | 2 +- .../discovery/kubernetes/kubernetes.go | 8 +- internal/component/discovery/kuma/kuma.go | 8 +- .../component/discovery/kuma/kuma_test.go | 2 +- internal/component/discovery/linode/linode.go | 8 +- .../component/discovery/linode/linode_test.go | 2 +- .../component/discovery/marathon/marathon.go | 8 +- .../discovery/marathon/marathon_test.go | 2 +- internal/component/discovery/nerve/nerve.go | 6 +- internal/component/discovery/nomad/nomad.go | 8 +- .../discovery/openstack/openstack.go | 8 +- .../discovery/openstack/openstack_test.go | 2 +- .../component/discovery/ovhcloud/ovhcloud.go | 6 +- .../discovery/ovhcloud/ovhcloud_test.go | 2 +- internal/component/discovery/process/args.go | 2 +- .../component/discovery/process/container.go | 2 +- .../component/discovery/process/discover.go | 2 +- internal/component/discovery/process/join.go | 2 +- .../component/discovery/process/join_test.go | 2 +- .../component/discovery/process/process.go | 6 +- .../discovery/process/process_stub.go | 8 +- .../component/discovery/puppetdb/puppetdb.go | 8 +- .../component/discovery/relabel/relabel.go | 8 +- .../discovery/relabel/relabel_test.go | 8 +- .../component/discovery/scaleway/scaleway.go | 8 +- .../discovery/serverset/serverset.go | 6 +- internal/component/discovery/triton/triton.go | 8 +- internal/component/discovery/uyuni/uyuni.go | 8 +- .../component/discovery/uyuni/uyuni_test.go | 2 +- internal/component/faro/receiver/arguments.go | 4 +- internal/component/faro/receiver/exporters.go | 8 +- .../component/faro/receiver/exporters_test.go | 2 +- internal/component/faro/receiver/handler.go | 4 +- .../component/faro/receiver/handler_test.go | 4 +- internal/component/faro/receiver/receiver.go | 6 +- .../component/faro/receiver/receiver_test.go | 8 +- internal/component/faro/receiver/server.go | 2 +- .../component/faro/receiver/sourcemaps.go | 6 +- .../faro/receiver/sourcemaps_test.go | 4 +- internal/component/local/file/file.go | 8 +- internal/component/local/file/file_test.go | 6 +- internal/component/local/file_match/file.go | 8 +- .../component/local/file_match/file_test.go | 6 +- internal/component/local/file_match/watch.go | 4 +- internal/component/loki/echo/echo.go | 8 +- internal/component/loki/process/process.go | 8 +- .../component/loki/process/process_test.go | 14 +- .../component/loki/process/stages/drop.go | 2 +- .../loki/process/stages/drop_test.go | 2 +- .../loki/process/stages/eventlogmessage.go | 2 +- .../loki/process/stages/extensions.go | 2 +- .../component/loki/process/stages/geoip.go | 2 +- .../component/loki/process/stages/json.go | 2 +- .../loki/process/stages/json_test.go | 2 +- .../component/loki/process/stages/labels.go | 2 +- .../component/loki/process/stages/limit.go | 2 +- .../component/loki/process/stages/logfmt.go | 2 +- .../loki/process/stages/logfmt_test.go | 2 +- .../loki/process/stages/match_test.go | 2 +- .../component/loki/process/stages/metric.go | 4 +- .../loki/process/stages/metric_test.go | 2 +- .../loki/process/stages/multiline.go | 4 +- .../loki/process/stages/multiline_test.go | 4 +- .../component/loki/process/stages/output.go | 2 +- .../loki/process/stages/output_test.go | 2 +- .../component/loki/process/stages/pack.go | 2 +- .../loki/process/stages/pack_test.go | 4 +- .../component/loki/process/stages/pipeline.go | 2 +- .../loki/process/stages/pipeline_test.go | 6 +- .../component/loki/process/stages/regex.go | 2 +- .../loki/process/stages/regex_test.go | 2 +- .../component/loki/process/stages/replace.go | 2 +- .../loki/process/stages/replace_test.go | 2 +- .../component/loki/process/stages/stage.go | 2 +- .../loki/process/stages/static_labels.go | 2 +- .../component/loki/process/stages/template.go | 2 +- .../component/loki/process/stages/tenant.go | 2 +- .../loki/process/stages/timestamp.go | 2 +- .../loki/process/stages/timestamp_test.go | 2 +- .../loki/process/stages/util_test.go | 2 +- internal/component/loki/relabel/relabel.go | 10 +- .../component/loki/relabel/relabel_test.go | 14 +- .../component/loki/rules/kubernetes/events.go | 4 +- .../loki/rules/kubernetes/events_test.go | 4 +- .../component/loki/rules/kubernetes/health.go | 2 +- .../component/loki/rules/kubernetes/rules.go | 10 +- .../component/loki/rules/kubernetes/types.go | 4 +- internal/component/loki/source/api/api.go | 14 +- .../component/loki/source/api/api_test.go | 14 +- .../api/internal/lokipush/push_api_server.go | 8 +- .../internal/lokipush/push_api_server_test.go | 10 +- .../loki/source/aws_firehose/component.go | 14 +- .../source/aws_firehose/component_test.go | 10 +- .../source/aws_firehose/internal/handler.go | 6 +- .../aws_firehose/internal/handler_test.go | 2 +- .../azure_event_hubs/azure_event_hubs.go | 14 +- .../internal/parser/parser.go | 2 +- .../loki/source/cloudflare/cloudflare.go | 12 +- .../internal/cloudflaretarget/target.go | 6 +- .../internal/cloudflaretarget/target_test.go | 4 +- .../component/loki/source/docker/docker.go | 20 +- .../loki/source/docker/docker_test.go | 6 +- .../docker/internal/dockertarget/target.go | 6 +- .../internal/dockertarget/target_test.go | 4 +- .../component/loki/source/docker/runner.go | 10 +- .../loki/source/file/decompresser.go | 6 +- .../loki/source/file/decompresser_test.go | 4 +- internal/component/loki/source/file/file.go | 12 +- .../component/loki/source/file/file_test.go | 10 +- internal/component/loki/source/file/tailer.go | 6 +- .../component/loki/source/gcplog/gcplog.go | 16 +- .../loki/source/gcplog/gcplog_test.go | 12 +- .../loki/source/gcplog/gcptypes/gcptypes.go | 2 +- .../gcplog/internal/gcplogtarget/formatter.go | 2 +- .../internal/gcplogtarget/pull_target.go | 6 +- .../internal/gcplogtarget/pull_target_test.go | 4 +- .../internal/gcplogtarget/push_target.go | 8 +- .../internal/gcplogtarget/push_target_test.go | 8 +- .../internal/gcplogtarget/push_translation.go | 2 +- internal/component/loki/source/gelf/gelf.go | 10 +- .../component/loki/source/gelf/gelf_test.go | 6 +- .../source/gelf/internal/target/gelftarget.go | 4 +- .../component/loki/source/heroku/heroku.go | 16 +- .../loki/source/heroku/heroku_test.go | 12 +- .../internal/herokutarget/herokutarget.go | 6 +- .../internal/herokutarget/target_test.go | 4 +- .../source/internal/kafkatarget/config.go | 2 +- .../source/internal/kafkatarget/consumer.go | 2 +- .../internal/kafkatarget/kafkatarget.go | 4 +- .../internal/kafkatarget/kafkatarget_test.go | 2 +- .../source/internal/kafkatarget/parser.go | 2 +- .../internal/kafkatarget/target_syncer.go | 4 +- .../kafkatarget/target_syncer_test.go | 2 +- .../journal/internal/target/journaltarget.go | 6 +- .../internal/target/journaltarget_test.go | 4 +- .../component/loki/source/journal/journal.go | 12 +- .../loki/source/journal/journal_stub.go | 6 +- .../loki/source/journal/journal_test.go | 6 +- .../component/loki/source/journal/types.go | 4 +- internal/component/loki/source/kafka/kafka.go | 14 +- .../loki/source/kubernetes/kubernetes.go | 18 +- .../source/kubernetes/kubetail/kubetail.go | 8 +- .../loki/source/kubernetes/kubetail/tailer.go | 6 +- .../kubernetes_events/event_controller.go | 8 +- .../kubernetes_events/kubernetes_events.go | 14 +- .../loki/source/podlogs/controller.go | 4 +- .../component/loki/source/podlogs/podlogs.go | 20 +- .../loki/source/podlogs/reconciler.go | 8 +- .../internal/syslogtarget/syslogtarget.go | 4 +- .../syslogtarget/syslogtarget_test.go | 2 +- .../syslog/internal/syslogtarget/transport.go | 2 +- .../component/loki/source/syslog/syslog.go | 12 +- .../loki/source/syslog/syslog_test.go | 8 +- .../component/loki/source/syslog/types.go | 4 +- .../loki/source/windowsevent/arguments.go | 2 +- .../source/windowsevent/component_stub.go | 6 +- .../source/windowsevent/component_test.go | 6 +- .../source/windowsevent/component_windows.go | 8 +- internal/component/loki/write/types.go | 6 +- internal/component/loki/write/write.go | 14 +- internal/component/loki/write/write_test.go | 12 +- internal/component/metadata/metadata.go | 12 +- .../mimir/rules/kubernetes/events.go | 4 +- .../mimir/rules/kubernetes/events_test.go | 4 +- .../mimir/rules/kubernetes/health.go | 2 +- .../component/mimir/rules/kubernetes/rules.go | 10 +- .../component/mimir/rules/kubernetes/types.go | 4 +- internal/component/otelcol/auth/auth.go | 10 +- internal/component/otelcol/auth/auth_test.go | 10 +- .../component/otelcol/auth/basic/basic.go | 6 +- .../otelcol/auth/basic/basic_test.go | 8 +- .../component/otelcol/auth/bearer/bearer.go | 6 +- .../otelcol/auth/bearer/bearer_test.go | 8 +- .../component/otelcol/auth/headers/headers.go | 6 +- .../otelcol/auth/headers/headers_test.go | 8 +- .../component/otelcol/auth/oauth2/oauth2.go | 8 +- .../otelcol/auth/oauth2/oauth2_test.go | 8 +- .../component/otelcol/auth/sigv4/sigv4.go | 6 +- .../otelcol/auth/sigv4/sigv4_test.go | 8 +- .../otelcol/config_attraction_test.go | 2 +- .../component/otelcol/config_filter_test.go | 2 +- internal/component/otelcol/config_grpc.go | 2 +- internal/component/otelcol/config_http.go | 2 +- .../component/otelcol/connector/connector.go | 16 +- .../otelcol/connector/host_info/host_info.go | 8 +- .../connector/servicegraph/servicegraph.go | 8 +- .../servicegraph/servicegraph_test.go | 2 +- .../otelcol/connector/spanlogs/spanlogs.go | 12 +- .../connector/spanlogs/spanlogs_test.go | 10 +- .../connector/spanmetrics/spanmetrics.go | 8 +- .../connector/spanmetrics/spanmetrics_test.go | 8 +- .../component/otelcol/exporter/exporter.go | 16 +- .../otelcol/exporter/exporter_test.go | 10 +- .../exporter/loadbalancing/loadbalancing.go | 10 +- .../loadbalancing/loadbalancing_test.go | 4 +- .../otelcol/exporter/logging/logging.go | 8 +- .../exporter/loki/internal/convert/convert.go | 4 +- .../loki/internal/convert/convert_test.go | 8 +- .../component/otelcol/exporter/loki/loki.go | 12 +- .../component/otelcol/exporter/otlp/otlp.go | 8 +- .../otelcol/exporter/otlp/otlp_test.go | 10 +- .../otelcol/exporter/otlphttp/otlphttp.go | 8 +- .../exporter/otlphttp/otlphttp_test.go | 10 +- .../prometheus/internal/convert/convert.go | 2 +- .../internal/convert/convert_test.go | 6 +- .../otelcol/exporter/prometheus/prometheus.go | 14 +- .../exporter/prometheus/prometheus_test.go | 2 +- .../component/otelcol/extension/extension.go | 10 +- .../otelcol/extension/extension_test.go | 10 +- .../jaegerremotesampling/extension.go | 4 +- .../jaeger_remote_sampling.go | 10 +- .../jaeger_remote_sampling_test.go | 8 +- .../otelcol/internal/fakeconsumer/fake.go | 2 +- .../otelcol/internal/fanoutconsumer/logs.go | 2 +- .../internal/fanoutconsumer/metrics.go | 2 +- .../otelcol/internal/fanoutconsumer/traces.go | 2 +- .../otelcol/internal/scheduler/host.go | 2 +- .../otelcol/internal/scheduler/scheduler.go | 4 +- .../internal/scheduler/scheduler_test.go | 6 +- .../processor/attributes/attributes.go | 8 +- .../processor/attributes/attributes_test.go | 10 +- .../otelcol/processor/batch/batch.go | 8 +- .../otelcol/processor/batch/batch_test.go | 12 +- .../otelcol/processor/discovery/discovery.go | 16 +- .../processor/discovery/discovery_test.go | 10 +- .../otelcol/processor/filter/filter.go | 8 +- .../otelcol/processor/filter/filter_test.go | 2 +- .../processor/k8sattributes/k8sattributes.go | 8 +- .../k8sattributes/k8sattributes_test.go | 2 +- .../processor/memorylimiter/memorylimiter.go | 8 +- .../memorylimiter/memorylimiter_test.go | 12 +- .../probabilistic_sampler.go | 8 +- .../probabilistic_sampler_test.go | 8 +- .../component/otelcol/processor/processor.go | 16 +- .../otelcol/processor/processor_test.go | 12 +- .../processor/processortest/processortest.go | 10 +- .../internal/aws/ec2/config.go | 2 +- .../internal/aws/ecs/config.go | 2 +- .../internal/aws/eks/config.go | 2 +- .../internal/aws/elasticbeanstalk/config.go | 2 +- .../internal/aws/lambda/config.go | 2 +- .../internal/azure/aks/config.go | 2 +- .../internal/azure/config.go | 2 +- .../internal/consul/config.go | 2 +- .../internal/docker/config.go | 2 +- .../resourcedetection/internal/gcp/config.go | 2 +- .../internal/heroku/config.go | 2 +- .../internal/k8snode/config.go | 4 +- .../internal/openshift/config.go | 4 +- .../internal/system/config.go | 2 +- .../resourcedetection/resourcedetection.go | 36 +-- .../resourcedetection_test.go | 30 +- .../component/otelcol/processor/span/span.go | 8 +- .../otelcol/processor/span/span_test.go | 8 +- .../processor/tail_sampling/tail_sampling.go | 8 +- .../tail_sampling/tail_sampling_test.go | 10 +- .../otelcol/processor/transform/transform.go | 8 +- .../processor/transform/transform_test.go | 2 +- .../otelcol/receiver/jaeger/jaeger.go | 8 +- .../otelcol/receiver/jaeger/jaeger_test.go | 8 +- .../component/otelcol/receiver/kafka/kafka.go | 8 +- .../otelcol/receiver/kafka/kafka_test.go | 4 +- .../component/otelcol/receiver/loki/loki.go | 12 +- .../otelcol/receiver/loki/loki_test.go | 10 +- .../otelcol/receiver/opencensus/opencensus.go | 8 +- .../receiver/opencensus/opencensus_test.go | 8 +- .../component/otelcol/receiver/otlp/otlp.go | 8 +- .../otelcol/receiver/otlp/otlp_test.go | 12 +- .../otelcol/receiver/prometheus/prometheus.go | 14 +- .../receiver/prometheus/prometheus_test.go | 12 +- .../component/otelcol/receiver/receiver.go | 16 +- .../otelcol/receiver/receiver_test.go | 12 +- .../otelcol/receiver/vcenter/vcenter.go | 8 +- .../otelcol/receiver/vcenter/vcenter_test.go | 4 +- .../otelcol/receiver/zipkin/zipkin.go | 8 +- .../otelcol/receiver/zipkin/zipkin_test.go | 8 +- .../prometheus/exporter/apache/apache.go | 10 +- .../prometheus/exporter/azure/azure.go | 10 +- .../prometheus/exporter/blackbox/blackbox.go | 14 +- .../exporter/blackbox/blackbox_test.go | 4 +- .../prometheus/exporter/cadvisor/cadvisor.go | 10 +- .../exporter/cadvisor/cadvisor_test.go | 2 +- .../exporter/cloudwatch/cloudwatch.go | 10 +- .../prometheus/exporter/cloudwatch/config.go | 2 +- .../prometheus/exporter/consul/consul.go | 10 +- .../prometheus/exporter/dnsmasq/dnsmasq.go | 10 +- .../exporter/dnsmasq/dnsmasq_test.go | 2 +- .../exporter/elasticsearch/elasticsearch.go | 12 +- .../elasticsearch/elasticsearch_test.go | 4 +- .../component/prometheus/exporter/exporter.go | 10 +- .../component/prometheus/exporter/gcp/gcp.go | 10 +- .../prometheus/exporter/github/github.go | 10 +- .../prometheus/exporter/kafka/kafka.go | 12 +- .../prometheus/exporter/kafka/kafka_test.go | 4 +- .../exporter/memcached/memcached.go | 12 +- .../exporter/memcached/memcached_test.go | 4 +- .../prometheus/exporter/mongodb/mongodb.go | 10 +- .../exporter/mongodb/mongodb_test.go | 2 +- .../prometheus/exporter/mssql/mssql.go | 12 +- .../prometheus/exporter/mssql/mssql_test.go | 2 +- .../prometheus/exporter/mysql/mysql.go | 10 +- .../prometheus/exporter/mysql/mysql_test.go | 2 +- .../prometheus/exporter/oracledb/oracledb.go | 10 +- .../exporter/oracledb/oracledb_test.go | 2 +- .../prometheus/exporter/postgres/postgres.go | 10 +- .../exporter/postgres/postgres_test.go | 2 +- .../prometheus/exporter/process/process.go | 10 +- .../prometheus/exporter/redis/redis.go | 10 +- .../prometheus/exporter/redis/redis_test.go | 2 +- .../prometheus/exporter/self/self.go | 10 +- .../prometheus/exporter/snmp/snmp.go | 12 +- .../prometheus/exporter/snmp/snmp_test.go | 4 +- .../exporter/snowflake/snowflake.go | 10 +- .../exporter/snowflake/snowflake_test.go | 2 +- .../prometheus/exporter/squid/squid.go | 10 +- .../prometheus/exporter/squid/squid_test.go | 2 +- .../prometheus/exporter/statsd/config.go | 2 +- .../prometheus/exporter/statsd/statsd.go | 8 +- .../prometheus/exporter/unix/config.go | 2 +- .../prometheus/exporter/unix/unix.go | 8 +- .../prometheus/exporter/windows/config.go | 2 +- .../exporter/windows/config_windows.go | 2 +- .../prometheus/exporter/windows/windows.go | 8 +- internal/component/prometheus/fanout.go | 2 +- internal/component/prometheus/fanout_test.go | 2 +- internal/component/prometheus/interceptor.go | 2 +- .../prometheus/operator/common/component.go | 10 +- .../prometheus/operator/common/crdmanager.go | 18 +- .../operator/common/crdmanager_test.go | 8 +- .../operator/configgen/config_gen.go | 6 +- .../configgen/config_gen_podmonitor_test.go | 8 +- .../configgen/config_gen_probe_test.go | 6 +- .../config_gen_servicemonitor_test.go | 6 +- .../operator/configgen/config_gen_test.go | 8 +- .../operator/podmonitors/operator.go | 8 +- .../prometheus/operator/probes/probes.go | 8 +- .../servicemonitors/servicemonitors.go | 8 +- .../component/prometheus/operator/types.go | 10 +- .../prometheus/receive_http/receive_http.go | 14 +- .../receive_http/receive_http_test.go | 10 +- .../component/prometheus/relabel/relabel.go | 10 +- .../prometheus/relabel/relabel_test.go | 12 +- .../component/prometheus/remotewrite/cli.go | 2 +- .../prometheus/remotewrite/remote_write.go | 16 +- .../remotewrite/remote_write_test.go | 6 +- .../component/prometheus/remotewrite/types.go | 4 +- .../component/prometheus/scrape/scrape.go | 20 +- .../prometheus/scrape/scrape_test.go | 12 +- internal/component/pyroscope/ebpf/args.go | 4 +- .../component/pyroscope/ebpf/ebpf_linux.go | 8 +- .../pyroscope/ebpf/ebpf_linux_test.go | 6 +- .../pyroscope/ebpf/ebpf_placeholder.go | 6 +- internal/component/pyroscope/java/args.go | 4 +- internal/component/pyroscope/java/java.go | 10 +- .../component/pyroscope/java/java_stub.go | 6 +- internal/component/pyroscope/java/loop.go | 8 +- internal/component/pyroscope/java/target.go | 2 +- .../pyroscope/scrape/delta_profiles.go | 4 +- .../pyroscope/scrape/delta_profiles_test.go | 2 +- .../scrape/internal/fastdelta/delta_map.go | 2 +- .../pyroscope/scrape/internal/fastdelta/fd.go | 2 +- .../scrape/internal/fastdelta/fuzz_test.go | 2 +- .../scrape/internal/fastdelta/hasher.go | 2 +- .../internal/pproflite/pproflite_test.go | 2 +- .../component/pyroscope/scrape/manager.go | 4 +- .../pyroscope/scrape/manager_test.go | 4 +- internal/component/pyroscope/scrape/scrape.go | 16 +- .../component/pyroscope/scrape/scrape_loop.go | 6 +- .../pyroscope/scrape/scrape_loop_test.go | 6 +- .../component/pyroscope/scrape/scrape_test.go | 12 +- internal/component/pyroscope/write/write.go | 14 +- .../component/pyroscope/write/write_test.go | 6 +- internal/component/registry.go | 2 +- internal/component/remote/http/http.go | 10 +- internal/component/remote/http/http_test.go | 8 +- .../remote/kubernetes/configmap/configmap.go | 6 +- .../component/remote/kubernetes/kubernetes.go | 4 +- .../remote/kubernetes/secret/secret.go | 6 +- internal/component/remote/s3/s3.go | 4 +- internal/component/remote/s3/s3_test.go | 2 +- internal/component/remote/vault/refresher.go | 4 +- internal/component/remote/vault/vault.go | 6 +- internal/component/remote/vault/vault_test.go | 4 +- internal/converter/converter.go | 8 +- .../internal/common/convert_logs_receiver.go | 2 +- .../internal/common/convert_targets.go | 2 +- .../internal/common/convert_targets_test.go | 4 +- .../internal/common/http_client_config.go | 4 +- .../converter/internal/common/river_utils.go | 8 +- .../internal/common/river_utils_test.go | 2 +- .../converter/internal/common/validate.go | 2 +- .../internal/common/validate_test.go | 4 +- .../internal/common/weaveworks_server.go | 4 +- .../internal/otelcolconvert/converter.go | 4 +- .../converter_attributesprocessor.go | 8 +- .../converter_basicauthextension.go | 6 +- .../converter_batchprocessor.go | 8 +- .../converter_bearertokenauthextension.go | 8 +- .../converter_filterprocessor.go | 8 +- .../converter_headerssetterextension.go | 6 +- .../otelcolconvert/converter_helpers.go | 2 +- .../converter_jaegerreceiver.go | 8 +- ...converter_jaegerremotesamplingextension.go | 6 +- .../converter_k8sattributesprocessor.go | 8 +- .../otelcolconvert/converter_kafkareceiver.go | 8 +- .../converter_loadbalancingexporter.go | 10 +- .../converter_loggingexporter.go | 6 +- .../converter_memorylimiterprocessor.go | 8 +- .../converter_oauth2clientauthextension.go | 6 +- .../converter_opencensusreceiver.go | 8 +- .../otelcolconvert/converter_otlpexporter.go | 10 +- .../converter_otlphttpexporter.go | 10 +- .../otelcolconvert/converter_otlpreceiver.go | 8 +- ...converter_probabilisticsamplerprocessor.go | 8 +- .../converter_spanmetricsconnector.go | 8 +- .../otelcolconvert/converter_spanprocessor.go | 8 +- .../converter_tailsamplingprocessor.go | 8 +- .../converter_transformprocessor.go | 8 +- .../converter_zipkinreceiver.go | 8 +- .../internal/otelcolconvert/otelcolconvert.go | 4 +- .../otelcolconvert/otelcolconvert_test.go | 4 +- .../build/prometheus_blocks.go | 2 +- .../prometheusconvert/component/azure.go | 10 +- .../prometheusconvert/component/consul.go | 10 +- .../component/digitalocean.go | 10 +- .../prometheusconvert/component/dns.go | 10 +- .../prometheusconvert/component/docker.go | 10 +- .../component/dockerswarm.go | 10 +- .../prometheusconvert/component/ec2.go | 10 +- .../prometheusconvert/component/file.go | 10 +- .../prometheusconvert/component/gce.go | 10 +- .../prometheusconvert/component/http.go | 12 +- .../prometheusconvert/component/ionos.go | 10 +- .../prometheusconvert/component/kubernetes.go | 12 +- .../prometheusconvert/component/kuma.go | 10 +- .../prometheusconvert/component/lightsail.go | 10 +- .../prometheusconvert/component/linode.go | 10 +- .../prometheusconvert/component/marathon.go | 10 +- .../prometheusconvert/component/nerve.go | 10 +- .../prometheusconvert/component/openstack.go | 10 +- .../prometheusconvert/component/ovhcloud.go | 10 +- .../prometheusconvert/component/relabel.go | 12 +- .../component/remote_write.go | 8 +- .../prometheusconvert/component/scaleway.go | 10 +- .../prometheusconvert/component/scrape.go | 12 +- .../prometheusconvert/component/serverset.go | 10 +- .../component/service_discovery.go | 8 +- .../prometheusconvert/component/triton.go | 10 +- .../prometheusconvert/prometheusconvert.go | 12 +- .../prometheusconvert_test.go | 6 +- .../internal/prometheusconvert/validate.go | 6 +- .../internal/build/azure_event_hub.go | 6 +- .../internal/build/cloudflare.go | 6 +- .../internal/build/consul_agent.go | 6 +- .../internal/build/docker_sd.go | 12 +- .../promtailconvert/internal/build/gcplog.go | 10 +- .../promtailconvert/internal/build/gelf.go | 6 +- .../internal/build/global_context.go | 2 +- .../internal/build/herokudrain.go | 6 +- .../promtailconvert/internal/build/journal.go | 8 +- .../promtailconvert/internal/build/kafka.go | 6 +- .../internal/build/loki_write.go | 8 +- .../internal/build/push_api.go | 8 +- .../internal/build/scrape_builder.go | 22 +- .../internal/build/service_discovery.go | 10 +- .../promtailconvert/internal/build/stages.go | 6 +- .../promtailconvert/internal/build/syslog.go | 6 +- .../internal/build/windows_events.go | 6 +- .../promtailconvert/promtailconvert.go | 8 +- .../promtailconvert/promtailconvert_test.go | 6 +- .../internal/promtailconvert/validate.go | 2 +- .../internal/build/apache_exporter.go | 8 +- .../internal/build/app_agent_receiver.go | 12 +- .../internal/build/azure_exporter.go | 6 +- .../internal/build/blackbox_exporter.go | 8 +- .../staticconvert/internal/build/builder.go | 4 +- .../internal/build/builder_integrations.go | 84 +++--- .../internal/build/builder_logging.go | 6 +- .../internal/build/builder_server.go | 6 +- .../internal/build/builder_traces.go | 6 +- .../internal/build/cadvisor_exporter.go | 6 +- .../internal/build/cloudwatch_exporter.go | 6 +- .../internal/build/consul_exporter.go | 6 +- .../internal/build/dnsmasq_exporter.go | 6 +- .../internal/build/elasticsearch_exporter.go | 8 +- .../internal/build/eventhandler.go | 14 +- .../internal/build/gcp_exporter.go | 6 +- .../internal/build/github_exporter.go | 6 +- .../internal/build/global_context.go | 4 +- .../internal/build/kafka_exporter.go | 6 +- .../internal/build/memcached_exporter.go | 8 +- .../internal/build/mongodb_exporter.go | 6 +- .../internal/build/mssql_exporter.go | 6 +- .../internal/build/mysqld_exporter.go | 6 +- .../internal/build/node_exporter.go | 6 +- .../internal/build/oracledb_exporter.go | 6 +- .../internal/build/postgres_exporter.go | 6 +- .../internal/build/process_exporter.go | 6 +- .../internal/build/redis_exporter.go | 6 +- .../internal/build/self_exporter.go | 8 +- .../internal/build/snmp_exporter.go | 10 +- .../internal/build/snowflake_exporter.go | 6 +- .../internal/build/squid_exporter.go | 6 +- .../internal/build/statsd_exporter.go | 8 +- .../internal/build/windows_exporter.go | 6 +- .../internal/staticconvert/staticconvert.go | 18 +- .../staticconvert/staticconvert_test.go | 6 +- .../internal/staticconvert/validate.go | 86 +++--- .../converter/internal/test_common/testing.go | 16 +- internal/filedetector/detector.go | 2 +- internal/flow/componenttest/componenttest.go | 6 +- internal/flow/componenttest/testfailmodule.go | 6 +- internal/flow/declare_test.go | 10 +- internal/flow/flow.go | 14 +- internal/flow/flow_components.go | 6 +- internal/flow/flow_services.go | 8 +- internal/flow/flow_services_test.go | 14 +- internal/flow/flow_test.go | 12 +- internal/flow/flow_updates_test.go | 4 +- internal/flow/import_test.go | 12 +- .../flow/internal/controller/block_node.go | 2 +- .../internal/controller/component_node.go | 2 +- .../controller/component_references.go | 2 +- .../internal/controller/component_registry.go | 4 +- internal/flow/internal/controller/loader.go | 12 +- .../flow/internal/controller/loader_test.go | 14 +- internal/flow/internal/controller/module.go | 2 +- .../controller/node_builtin_component.go | 10 +- .../controller/node_builtin_component_test.go | 2 +- .../flow/internal/controller/node_config.go | 2 +- .../internal/controller/node_config_import.go | 10 +- .../controller/node_config_logging.go | 2 +- .../controller/node_config_tracing.go | 2 +- .../controller/node_custom_component.go | 4 +- .../flow/internal/controller/node_service.go | 4 +- .../internal/controller/scheduler_test.go | 4 +- .../flow/internal/controller/service_map.go | 2 +- .../flow/internal/controller/value_cache.go | 2 +- .../flow/internal/importsource/import_file.go | 6 +- .../flow/internal/importsource/import_git.go | 6 +- .../flow/internal/importsource/import_http.go | 6 +- .../internal/importsource/import_source.go | 2 +- .../internal/importsource/import_string.go | 2 +- .../flow/internal/testcomponents/count.go | 6 +- .../internal/testcomponents/experimental.go | 4 +- internal/flow/internal/testcomponents/fake.go | 2 +- .../testcomponents/module/file/file.go | 8 +- .../internal/testcomponents/module/git/git.go | 10 +- .../testcomponents/module/http/http.go | 8 +- .../internal/testcomponents/module/module.go | 4 +- .../testcomponents/module/string/string.go | 6 +- .../internal/testcomponents/passthrough.go | 6 +- .../flow/internal/testcomponents/sumation.go | 6 +- internal/flow/internal/testcomponents/tick.go | 6 +- internal/flow/internal/testservices/fake.go | 2 +- internal/flow/logging/level/level.go | 2 +- internal/flow/logging/logger.go | 4 +- internal/flow/logging/logger_test.go | 6 +- internal/flow/logging/options.go | 2 +- internal/flow/module.go | 14 +- internal/flow/module_eval_test.go | 22 +- internal/flow/module_fail_test.go | 4 +- internal/flow/module_test.go | 12 +- internal/flow/source.go | 2 +- internal/flow/source_test.go | 2 +- .../tracing/internal/jaegerremote/sampler.go | 2 +- internal/flow/tracing/otelcol_client.go | 2 +- internal/flow/tracing/tracing.go | 6 +- internal/flowmode/cluster_builder.go | 4 +- internal/flowmode/cmd_convert.go | 4 +- internal/flowmode/cmd_run.go | 38 +-- internal/flowmode/cmd_tools.go | 2 +- internal/flowmode/flowmode.go | 2 +- internal/flowmode/resources_collector.go | 2 +- internal/loki/client/client.go | 2 +- internal/mimir/client/client.go | 2 +- internal/runner/runner_test.go | 4 +- internal/service/cluster/cluster.go | 10 +- internal/service/http/handler.go | 2 +- internal/service/http/handler_windows.go | 2 +- internal/service/http/http.go | 12 +- internal/service/http/http_test.go | 10 +- internal/service/http/split_path.go | 4 +- internal/service/http/split_path_test.go | 4 +- internal/service/labelstore/service.go | 8 +- internal/service/otel/otel.go | 6 +- internal/service/remotecfg/remotecfg.go | 10 +- internal/service/remotecfg/remotecfg_test.go | 16 +- internal/service/service.go | 4 +- internal/service/ui/ui.go | 10 +- internal/static/config/config.go | 16 +- internal/static/config/config_test.go | 8 +- internal/static/config/integrations.go | 10 +- internal/static/config/integrations_test.go | 4 +- internal/static/integrations/agent/agent.go | 4 +- .../integrations/apache_http/apache_http.go | 2 +- .../azure_exporter/azure_exporter.go | 2 +- .../integrations/azure_exporter/config.go | 8 +- .../azure_exporter/config_test.go | 2 +- .../blackbox_exporter/blackbox_exporter.go | 6 +- .../blackbox_exporter_test.go | 2 +- .../static/integrations/cadvisor/cadvisor.go | 2 +- .../integrations/cadvisor/cadvisor_stub.go | 4 +- .../integrations/cadvisor/cadvisor_test.go | 2 +- .../static/integrations/cadvisor/common.go | 6 +- .../cloudwatch_exporter.go | 2 +- .../cloudwatch_exporter_decoupled.go | 2 +- .../cloudwatch_exporter/config.go | 6 +- .../integrations/collector_integration.go | 4 +- .../consul_exporter/consul_exporter.go | 6 +- .../dnsmasq_exporter/dnsmasq_exporter.go | 6 +- .../elasticsearch_exporter.go | 6 +- .../integrations/gcp_exporter/gcp_exporter.go | 6 +- .../gcp_exporter/gcp_exporter_test.go | 2 +- .../github_exporter/github_exporter.go | 6 +- .../github_exporter/github_test.go | 2 +- .../integrations/handler_integration.go | 2 +- .../static/integrations/install/install.go | 68 ++--- .../integrations/install/install_test.go | 4 +- internal/static/integrations/integration.go | 2 +- .../kafka_exporter/kafka_exporter.go | 6 +- .../integrations/kafka_exporter/kafka_test.go | 2 +- internal/static/integrations/manager.go | 4 +- .../memcached_exporter/memcached_exporter.go | 6 +- .../mongodb_exporter/mongodb_exporter.go | 6 +- .../mongodb_exporter/mongodb_test.go | 2 +- .../static/integrations/mssql/sql_exporter.go | 8 +- .../mysqld_exporter/mysqld-exporter.go | 6 +- .../mysqld_exporter/mysqld_test.go | 2 +- .../integrations/node_exporter/config.go | 6 +- .../node_exporter/node_exporter.go | 4 +- .../node_exporter/node_exporter_windows.go | 2 +- .../oracledb_exporter/oracledb_exporter.go | 6 +- .../postgres_exporter/postgres_exporter.go | 6 +- .../postgres_exporter/postgres_test.go | 2 +- .../integrations/process_exporter/config.go | 6 +- .../process_exporter/process-exporter.go | 2 +- .../process-exporter_linux.go | 4 +- .../redis_exporter/redis_exporter.go | 6 +- .../redis_exporter/redis_exporter_test.go | 2 +- internal/static/integrations/register.go | 4 +- .../snmp_exporter/snmp_exporter.go | 6 +- .../snowflake_exporter/snowflake_exporter.go | 6 +- .../squid_exporter/squid_exporter.go | 6 +- .../statsd_exporter/statsd_exporter.go | 10 +- .../static/integrations/v2/agent/agent.go | 6 +- .../v2/apache_http/apache_http.go | 6 +- .../v2/apache_http/apache_http_test.go | 2 +- .../app_agent_receiver/app_agent_receiver.go | 2 +- .../v2/app_agent_receiver/config.go | 4 +- .../v2/blackbox_exporter/blackbox.go | 8 +- .../v2/blackbox_exporter/blackbox_exporter.go | 8 +- .../v2/blackbox_exporter/blackbox_test.go | 10 +- .../static/integrations/v2/common/metrics.go | 2 +- .../v2/eventhandler/integration.go | 2 +- .../static/integrations/v2/integrations.go | 4 +- .../metricshandler_integration.go | 6 +- .../metricshandler_integration_test.go | 4 +- .../v2/metricsutils/versionshim.go | 8 +- internal/static/integrations/v2/register.go | 6 +- .../static/integrations/v2/register_test.go | 4 +- .../integrations/v2/snmp_exporter/snmp.go | 8 +- .../v2/snmp_exporter/snmp_exporter.go | 6 +- internal/static/integrations/v2/subsystem.go | 4 +- .../static/integrations/v2/subsystem_test.go | 4 +- internal/static/integrations/v2/utils.go | 2 +- .../v2/vmware_exporter/vmware_exporter.go | 6 +- .../vmware_exporter/vmware_exporter.go | 2 +- .../integrations/windows_exporter/config.go | 6 +- .../windows_exporter/windows_exporter.go | 2 +- .../windows_exporter_windows.go | 2 +- internal/static/logs/logs.go | 2 +- internal/static/metrics/agent.go | 8 +- internal/static/metrics/agent_test.go | 2 +- .../static/metrics/cluster/client/client.go | 2 +- internal/static/metrics/cluster/config.go | 6 +- internal/static/metrics/instance/instance.go | 2 +- internal/static/metrics/wal/wal_test.go | 2 +- internal/static/server/config.go | 2 +- .../automaticloggingprocessor.go | 2 +- .../automaticloggingprocessor_test.go | 4 +- .../automaticloggingprocessor/factory.go | 2 +- internal/static/traces/config.go | 18 +- internal/static/traces/config_test.go | 2 +- .../promsdprocessor/consumer/consumer.go | 2 +- .../promsdprocessor/consumer/consumer_test.go | 4 +- .../promsdprocessor/prom_sd_processor.go | 6 +- .../promsdprocessor/prom_sd_processor_test.go | 2 +- .../traces/servicegraphprocessor/processor.go | 2 +- .../servicegraphprocessor/processor_test.go | 2 +- internal/static/traces/traceutils/server.go | 2 +- .../compatible_components_page.go | 2 +- .../tools/docs_generator/docs_generator.go | 4 +- .../tools/docs_generator/docs_updated_test.go | 8 +- .../tools/docs_generator/links_to_types.go | 2 +- internal/usagestats/reporter.go | 2 +- internal/usagestats/stats.go | 4 +- internal/useragent/useragent.go | 2 +- internal/useragent/useragent_test.go | 2 +- internal/util/test_logger.go | 2 +- internal/util/testappender/testappender.go | 2 +- .../util/testappender/testappender_test.go | 2 +- internal/util/wildcard/match_test.go | 2 +- internal/util/zapadapter/zapadapter_test.go | 2 +- internal/vcs/git_test.go | 2 +- internal/web/api/api.go | 6 +- internal/web/ui/ui.go | 2 +- 776 files changed, 2647 insertions(+), 2647 deletions(-) diff --git a/cmd/alloy-service/service_test.go b/cmd/alloy-service/service_test.go index 145879a243..561db0ce54 100644 --- a/cmd/alloy-service/service_test.go +++ b/cmd/alloy-service/service_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/phayes/freeport" "github.com/stretchr/testify/require" ) diff --git a/cmd/alloy/main.go b/cmd/alloy/main.go index 5bedc67367..ec6bee2895 100644 --- a/cmd/alloy/main.go +++ b/cmd/alloy/main.go @@ -1,8 +1,8 @@ package main import ( - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/flowmode" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/flowmode" "github.com/prometheus/client_golang/prometheus" // Register Prometheus SD components @@ -10,7 +10,7 @@ import ( _ "github.com/prometheus/prometheus/discovery/install" // Register integrations - _ "github.com/grafana/agent/internal/static/integrations/install" + _ "github.com/grafana/alloy/internal/static/integrations/install" // Embed a set of fallback X.509 trusted roots // Allows the app to work correctly even when the OS does not provide a verifier or systems roots pool diff --git a/go.mod b/go.mod index 5b2eb5eacf..496c17a647 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/grafana/agent +module github.com/grafana/alloy go 1.22.1 diff --git a/internal/agentseed/agentseed.go b/internal/agentseed/agentseed.go index 0275e81c33..8ee9140fb0 100644 --- a/internal/agentseed/agentseed.go +++ b/internal/agentseed/agentseed.go @@ -11,7 +11,7 @@ import ( "github.com/go-kit/log" "github.com/google/uuid" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/version" ) diff --git a/internal/cmd/agentlint/go.mod b/internal/cmd/agentlint/go.mod index 8c68e5a43b..31cf906db8 100644 --- a/internal/cmd/agentlint/go.mod +++ b/internal/cmd/agentlint/go.mod @@ -1,4 +1,4 @@ -module github.com/grafana/agent/internal/cmd/agentlint +module github.com/grafana/alloy/internal/cmd/agentlint go 1.21 diff --git a/internal/cmd/agentlint/internal/findcomponents/findcomponents.go b/internal/cmd/agentlint/internal/findcomponents/findcomponents.go index 922798a89d..76d9386e9f 100644 --- a/internal/cmd/agentlint/internal/findcomponents/findcomponents.go +++ b/internal/cmd/agentlint/internal/findcomponents/findcomponents.go @@ -19,7 +19,7 @@ var Analyzer = &analysis.Analyzer{ var ( componentPattern = "./internal/component/..." - checkPackage = "github.com/grafana/agent/internal/component/all" + checkPackage = "github.com/grafana/alloy/internal/component/all" ) func init() { @@ -129,9 +129,9 @@ func declaresComponent(pkg *packages.Package, file *ast.File) bool { } // Check to see if the ident refers to - // github.com/grafana/agent/component. + // github.com/grafana/alloy/internal/component. if pkgName, ok := pkg.TypesInfo.Uses[ident].(*types.PkgName); ok { - if pkgName.Imported().Path() == "github.com/grafana/agent/internal/component" && + if pkgName.Imported().Path() == "github.com/grafana/alloy/internal/component" && sel.Sel.Name == "Register" { foundComponentDecl = true diff --git a/internal/cmd/agentlint/main.go b/internal/cmd/agentlint/main.go index 551642f3ae..01156d5ecb 100644 --- a/internal/cmd/agentlint/main.go +++ b/internal/cmd/agentlint/main.go @@ -3,8 +3,8 @@ package main import ( - "github.com/grafana/agent/internal/cmd/agentlint/internal/findcomponents" - "github.com/grafana/agent/internal/cmd/agentlint/internal/syntaxtags" + "github.com/grafana/alloy/internal/cmd/agentlint/internal/findcomponents" + "github.com/grafana/alloy/internal/cmd/agentlint/internal/syntaxtags" "golang.org/x/tools/go/analysis/multichecker" ) diff --git a/internal/cmd/integration-tests/tests/otlp-metrics/otlp_metrics_test.go b/internal/cmd/integration-tests/tests/otlp-metrics/otlp_metrics_test.go index 157f334448..4c4f4de2e4 100644 --- a/internal/cmd/integration-tests/tests/otlp-metrics/otlp_metrics_test.go +++ b/internal/cmd/integration-tests/tests/otlp-metrics/otlp_metrics_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" ) func TestOTLPMetrics(t *testing.T) { diff --git a/internal/cmd/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go b/internal/cmd/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go index beca3f9ccf..42d0ca5d6d 100644 --- a/internal/cmd/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go +++ b/internal/cmd/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" ) func TestOTLPToPromMetrics(t *testing.T) { diff --git a/internal/cmd/integration-tests/tests/read-log-file/read_log_file_test.go b/internal/cmd/integration-tests/tests/read-log-file/read_log_file_test.go index 0549fcbb1e..b5095e5496 100644 --- a/internal/cmd/integration-tests/tests/read-log-file/read_log_file_test.go +++ b/internal/cmd/integration-tests/tests/read-log-file/read_log_file_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" "github.com/stretchr/testify/assert" ) diff --git a/internal/cmd/integration-tests/tests/redis/redis_metrics_test.go b/internal/cmd/integration-tests/tests/redis/redis_metrics_test.go index 3063d8fdf1..296076e3f6 100644 --- a/internal/cmd/integration-tests/tests/redis/redis_metrics_test.go +++ b/internal/cmd/integration-tests/tests/redis/redis_metrics_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" ) func TestRedisMetrics(t *testing.T) { diff --git a/internal/cmd/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go b/internal/cmd/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go index dbac235d18..4bd6a71aa8 100644 --- a/internal/cmd/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go +++ b/internal/cmd/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" ) func TestScrapePromMetrics(t *testing.T) { diff --git a/internal/cmd/integration-tests/tests/unix/unix_metrics_test.go b/internal/cmd/integration-tests/tests/unix/unix_metrics_test.go index 1611a47959..22237bef1d 100644 --- a/internal/cmd/integration-tests/tests/unix/unix_metrics_test.go +++ b/internal/cmd/integration-tests/tests/unix/unix_metrics_test.go @@ -5,7 +5,7 @@ package main import ( "testing" - "github.com/grafana/agent/internal/cmd/integration-tests/common" + "github.com/grafana/alloy/internal/cmd/integration-tests/common" ) func TestUnixMetrics(t *testing.T) { diff --git a/internal/component/all/all.go b/internal/component/all/all.go index 3369357d50..1f34796452 100644 --- a/internal/component/all/all.go +++ b/internal/component/all/all.go @@ -2,136 +2,136 @@ package all import ( - _ "github.com/grafana/agent/internal/component/discovery/aws" // Import discovery.aws.ec2 and discovery.aws.lightsail - _ "github.com/grafana/agent/internal/component/discovery/azure" // Import discovery.azure - _ "github.com/grafana/agent/internal/component/discovery/consul" // Import discovery.consul - _ "github.com/grafana/agent/internal/component/discovery/consulagent" // Import discovery.consulagent - _ "github.com/grafana/agent/internal/component/discovery/digitalocean" // Import discovery.digitalocean - _ "github.com/grafana/agent/internal/component/discovery/dns" // Import discovery.dns - _ "github.com/grafana/agent/internal/component/discovery/docker" // Import discovery.docker - _ "github.com/grafana/agent/internal/component/discovery/dockerswarm" // Import discovery.dockerswarm - _ "github.com/grafana/agent/internal/component/discovery/eureka" // Import discovery.eureka - _ "github.com/grafana/agent/internal/component/discovery/file" // Import discovery.file - _ "github.com/grafana/agent/internal/component/discovery/gce" // Import discovery.gce - _ "github.com/grafana/agent/internal/component/discovery/hetzner" // Import discovery.hetzner - _ "github.com/grafana/agent/internal/component/discovery/http" // Import discovery.http - _ "github.com/grafana/agent/internal/component/discovery/ionos" // Import discovery.ionos - _ "github.com/grafana/agent/internal/component/discovery/kubelet" // Import discovery.kubelet - _ "github.com/grafana/agent/internal/component/discovery/kubernetes" // Import discovery.kubernetes - _ "github.com/grafana/agent/internal/component/discovery/kuma" // Import discovery.kuma - _ "github.com/grafana/agent/internal/component/discovery/linode" // Import discovery.linode - _ "github.com/grafana/agent/internal/component/discovery/marathon" // Import discovery.marathon - _ "github.com/grafana/agent/internal/component/discovery/nerve" // Import discovery.nerve - _ "github.com/grafana/agent/internal/component/discovery/nomad" // Import discovery.nomad - _ "github.com/grafana/agent/internal/component/discovery/openstack" // Import discovery.openstack - _ "github.com/grafana/agent/internal/component/discovery/ovhcloud" // Import discovery.ovhcloud - _ "github.com/grafana/agent/internal/component/discovery/process" // Import discovery.process - _ "github.com/grafana/agent/internal/component/discovery/puppetdb" // Import discovery.puppetdb - _ "github.com/grafana/agent/internal/component/discovery/relabel" // Import discovery.relabel - _ "github.com/grafana/agent/internal/component/discovery/scaleway" // Import discovery.scaleway - _ "github.com/grafana/agent/internal/component/discovery/serverset" // Import discovery.serverset - _ "github.com/grafana/agent/internal/component/discovery/triton" // Import discovery.triton - _ "github.com/grafana/agent/internal/component/discovery/uyuni" // Import discovery.uyuni - _ "github.com/grafana/agent/internal/component/faro/receiver" // Import faro.receiver - _ "github.com/grafana/agent/internal/component/local/file" // Import local.file - _ "github.com/grafana/agent/internal/component/local/file_match" // Import local.file_match - _ "github.com/grafana/agent/internal/component/loki/echo" // Import loki.echo - _ "github.com/grafana/agent/internal/component/loki/process" // Import loki.process - _ "github.com/grafana/agent/internal/component/loki/relabel" // Import loki.relabel - _ "github.com/grafana/agent/internal/component/loki/rules/kubernetes" // Import loki.rules.kubernetes - _ "github.com/grafana/agent/internal/component/loki/source/api" // Import loki.source.api - _ "github.com/grafana/agent/internal/component/loki/source/aws_firehose" // Import loki.source.awsfirehose - _ "github.com/grafana/agent/internal/component/loki/source/azure_event_hubs" // Import loki.source.azure_event_hubs - _ "github.com/grafana/agent/internal/component/loki/source/cloudflare" // Import loki.source.cloudflare - _ "github.com/grafana/agent/internal/component/loki/source/docker" // Import loki.source.docker - _ "github.com/grafana/agent/internal/component/loki/source/file" // Import loki.source.file - _ "github.com/grafana/agent/internal/component/loki/source/gcplog" // Import loki.source.gcplog - _ "github.com/grafana/agent/internal/component/loki/source/gelf" // Import loki.source.gelf - _ "github.com/grafana/agent/internal/component/loki/source/heroku" // Import loki.source.heroku - _ "github.com/grafana/agent/internal/component/loki/source/journal" // Import loki.source.journal - _ "github.com/grafana/agent/internal/component/loki/source/kafka" // Import loki.source.kafka - _ "github.com/grafana/agent/internal/component/loki/source/kubernetes" // Import loki.source.kubernetes - _ "github.com/grafana/agent/internal/component/loki/source/kubernetes_events" // Import loki.source.kubernetes_events - _ "github.com/grafana/agent/internal/component/loki/source/podlogs" // Import loki.source.podlogs - _ "github.com/grafana/agent/internal/component/loki/source/syslog" // Import loki.source.syslog - _ "github.com/grafana/agent/internal/component/loki/source/windowsevent" // Import loki.source.windowsevent - _ "github.com/grafana/agent/internal/component/loki/write" // Import loki.write - _ "github.com/grafana/agent/internal/component/mimir/rules/kubernetes" // Import mimir.rules.kubernetes - _ "github.com/grafana/agent/internal/component/otelcol/auth/basic" // Import otelcol.auth.basic - _ "github.com/grafana/agent/internal/component/otelcol/auth/bearer" // Import otelcol.auth.bearer - _ "github.com/grafana/agent/internal/component/otelcol/auth/headers" // Import otelcol.auth.headers - _ "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" // Import otelcol.auth.oauth2 - _ "github.com/grafana/agent/internal/component/otelcol/auth/sigv4" // Import otelcol.auth.sigv4 - _ "github.com/grafana/agent/internal/component/otelcol/connector/host_info" // Import otelcol.connector.host_info - _ "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" // Import otelcol.connector.servicegraph - _ "github.com/grafana/agent/internal/component/otelcol/connector/spanlogs" // Import otelcol.connector.spanlogs - _ "github.com/grafana/agent/internal/component/otelcol/connector/spanmetrics" // Import otelcol.connector.spanmetrics - _ "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" // Import otelcol.exporter.loadbalancing - _ "github.com/grafana/agent/internal/component/otelcol/exporter/logging" // Import otelcol.exporter.logging - _ "github.com/grafana/agent/internal/component/otelcol/exporter/loki" // Import otelcol.exporter.loki - _ "github.com/grafana/agent/internal/component/otelcol/exporter/otlp" // Import otelcol.exporter.otlp - _ "github.com/grafana/agent/internal/component/otelcol/exporter/otlphttp" // Import otelcol.exporter.otlphttp - _ "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus" // Import otelcol.exporter.prometheus - _ "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" // Import otelcol.extension.jaeger_remote_sampling - _ "github.com/grafana/agent/internal/component/otelcol/processor/attributes" // Import otelcol.processor.attributes - _ "github.com/grafana/agent/internal/component/otelcol/processor/batch" // Import otelcol.processor.batch - _ "github.com/grafana/agent/internal/component/otelcol/processor/discovery" // Import otelcol.processor.discovery - _ "github.com/grafana/agent/internal/component/otelcol/processor/filter" // Import otelcol.processor.filter - _ "github.com/grafana/agent/internal/component/otelcol/processor/k8sattributes" // Import otelcol.processor.k8sattributes - _ "github.com/grafana/agent/internal/component/otelcol/processor/memorylimiter" // Import otelcol.processor.memory_limiter - _ "github.com/grafana/agent/internal/component/otelcol/processor/probabilistic_sampler" // Import otelcol.processor.probabilistic_sampler - _ "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection" // Import otelcol.processor.resourcedetection - _ "github.com/grafana/agent/internal/component/otelcol/processor/span" // Import otelcol.processor.span - _ "github.com/grafana/agent/internal/component/otelcol/processor/tail_sampling" // Import otelcol.processor.tail_sampling - _ "github.com/grafana/agent/internal/component/otelcol/processor/transform" // Import otelcol.processor.transform - _ "github.com/grafana/agent/internal/component/otelcol/receiver/jaeger" // Import otelcol.receiver.jaeger - _ "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" // Import otelcol.receiver.kafka - _ "github.com/grafana/agent/internal/component/otelcol/receiver/loki" // Import otelcol.receiver.loki - _ "github.com/grafana/agent/internal/component/otelcol/receiver/opencensus" // Import otelcol.receiver.opencensus - _ "github.com/grafana/agent/internal/component/otelcol/receiver/otlp" // Import otelcol.receiver.otlp - _ "github.com/grafana/agent/internal/component/otelcol/receiver/prometheus" // Import otelcol.receiver.prometheus - _ "github.com/grafana/agent/internal/component/otelcol/receiver/vcenter" // Import otelcol.receiver.vcenter - _ "github.com/grafana/agent/internal/component/otelcol/receiver/zipkin" // Import otelcol.receiver.zipkin - _ "github.com/grafana/agent/internal/component/prometheus/exporter/apache" // Import prometheus.exporter.apache - _ "github.com/grafana/agent/internal/component/prometheus/exporter/azure" // Import prometheus.exporter.azure - _ "github.com/grafana/agent/internal/component/prometheus/exporter/blackbox" // Import prometheus.exporter.blackbox - _ "github.com/grafana/agent/internal/component/prometheus/exporter/cadvisor" // Import prometheus.exporter.cadvisor - _ "github.com/grafana/agent/internal/component/prometheus/exporter/cloudwatch" // Import prometheus.exporter.cloudwatch - _ "github.com/grafana/agent/internal/component/prometheus/exporter/consul" // Import prometheus.exporter.consul - _ "github.com/grafana/agent/internal/component/prometheus/exporter/dnsmasq" // Import prometheus.exporter.dnsmasq - _ "github.com/grafana/agent/internal/component/prometheus/exporter/elasticsearch" // Import prometheus.exporter.elasticsearch - _ "github.com/grafana/agent/internal/component/prometheus/exporter/gcp" // Import prometheus.exporter.gcp - _ "github.com/grafana/agent/internal/component/prometheus/exporter/github" // Import prometheus.exporter.github - _ "github.com/grafana/agent/internal/component/prometheus/exporter/kafka" // Import prometheus.exporter.kafka - _ "github.com/grafana/agent/internal/component/prometheus/exporter/memcached" // Import prometheus.exporter.memcached - _ "github.com/grafana/agent/internal/component/prometheus/exporter/mongodb" // Import prometheus.exporter.mongodb - _ "github.com/grafana/agent/internal/component/prometheus/exporter/mssql" // Import prometheus.exporter.mssql - _ "github.com/grafana/agent/internal/component/prometheus/exporter/mysql" // Import prometheus.exporter.mysql - _ "github.com/grafana/agent/internal/component/prometheus/exporter/oracledb" // Import prometheus.exporter.oracledb - _ "github.com/grafana/agent/internal/component/prometheus/exporter/postgres" // Import prometheus.exporter.postgres - _ "github.com/grafana/agent/internal/component/prometheus/exporter/process" // Import prometheus.exporter.process - _ "github.com/grafana/agent/internal/component/prometheus/exporter/redis" // Import prometheus.exporter.redis - _ "github.com/grafana/agent/internal/component/prometheus/exporter/self" // Import prometheus.exporter.self - _ "github.com/grafana/agent/internal/component/prometheus/exporter/snmp" // Import prometheus.exporter.snmp - _ "github.com/grafana/agent/internal/component/prometheus/exporter/snowflake" // Import prometheus.exporter.snowflake - _ "github.com/grafana/agent/internal/component/prometheus/exporter/squid" // Import prometheus.exporter.squid - _ "github.com/grafana/agent/internal/component/prometheus/exporter/statsd" // Import prometheus.exporter.statsd - _ "github.com/grafana/agent/internal/component/prometheus/exporter/unix" // Import prometheus.exporter.unix - _ "github.com/grafana/agent/internal/component/prometheus/exporter/windows" // Import prometheus.exporter.windows - _ "github.com/grafana/agent/internal/component/prometheus/operator/podmonitors" // Import prometheus.operator.podmonitors - _ "github.com/grafana/agent/internal/component/prometheus/operator/probes" // Import prometheus.operator.probes - _ "github.com/grafana/agent/internal/component/prometheus/operator/servicemonitors" // Import prometheus.operator.servicemonitors - _ "github.com/grafana/agent/internal/component/prometheus/receive_http" // Import prometheus.receive_http - _ "github.com/grafana/agent/internal/component/prometheus/relabel" // Import prometheus.relabel - _ "github.com/grafana/agent/internal/component/prometheus/remotewrite" // Import prometheus.remote_write - _ "github.com/grafana/agent/internal/component/prometheus/scrape" // Import prometheus.scrape - _ "github.com/grafana/agent/internal/component/pyroscope/ebpf" // Import pyroscope.ebpf - _ "github.com/grafana/agent/internal/component/pyroscope/java" // Import pyroscope.java - _ "github.com/grafana/agent/internal/component/pyroscope/scrape" // Import pyroscope.scrape - _ "github.com/grafana/agent/internal/component/pyroscope/write" // Import pyroscope.write - _ "github.com/grafana/agent/internal/component/remote/http" // Import remote.http - _ "github.com/grafana/agent/internal/component/remote/kubernetes/configmap" // Import remote.kubernetes.configmap - _ "github.com/grafana/agent/internal/component/remote/kubernetes/secret" // Import remote.kubernetes.secret - _ "github.com/grafana/agent/internal/component/remote/s3" // Import remote.s3 - _ "github.com/grafana/agent/internal/component/remote/vault" // Import remote.vault + _ "github.com/grafana/alloy/internal/component/discovery/aws" // Import discovery.aws.ec2 and discovery.aws.lightsail + _ "github.com/grafana/alloy/internal/component/discovery/azure" // Import discovery.azure + _ "github.com/grafana/alloy/internal/component/discovery/consul" // Import discovery.consul + _ "github.com/grafana/alloy/internal/component/discovery/consulagent" // Import discovery.consulagent + _ "github.com/grafana/alloy/internal/component/discovery/digitalocean" // Import discovery.digitalocean + _ "github.com/grafana/alloy/internal/component/discovery/dns" // Import discovery.dns + _ "github.com/grafana/alloy/internal/component/discovery/docker" // Import discovery.docker + _ "github.com/grafana/alloy/internal/component/discovery/dockerswarm" // Import discovery.dockerswarm + _ "github.com/grafana/alloy/internal/component/discovery/eureka" // Import discovery.eureka + _ "github.com/grafana/alloy/internal/component/discovery/file" // Import discovery.file + _ "github.com/grafana/alloy/internal/component/discovery/gce" // Import discovery.gce + _ "github.com/grafana/alloy/internal/component/discovery/hetzner" // Import discovery.hetzner + _ "github.com/grafana/alloy/internal/component/discovery/http" // Import discovery.http + _ "github.com/grafana/alloy/internal/component/discovery/ionos" // Import discovery.ionos + _ "github.com/grafana/alloy/internal/component/discovery/kubelet" // Import discovery.kubelet + _ "github.com/grafana/alloy/internal/component/discovery/kubernetes" // Import discovery.kubernetes + _ "github.com/grafana/alloy/internal/component/discovery/kuma" // Import discovery.kuma + _ "github.com/grafana/alloy/internal/component/discovery/linode" // Import discovery.linode + _ "github.com/grafana/alloy/internal/component/discovery/marathon" // Import discovery.marathon + _ "github.com/grafana/alloy/internal/component/discovery/nerve" // Import discovery.nerve + _ "github.com/grafana/alloy/internal/component/discovery/nomad" // Import discovery.nomad + _ "github.com/grafana/alloy/internal/component/discovery/openstack" // Import discovery.openstack + _ "github.com/grafana/alloy/internal/component/discovery/ovhcloud" // Import discovery.ovhcloud + _ "github.com/grafana/alloy/internal/component/discovery/process" // Import discovery.process + _ "github.com/grafana/alloy/internal/component/discovery/puppetdb" // Import discovery.puppetdb + _ "github.com/grafana/alloy/internal/component/discovery/relabel" // Import discovery.relabel + _ "github.com/grafana/alloy/internal/component/discovery/scaleway" // Import discovery.scaleway + _ "github.com/grafana/alloy/internal/component/discovery/serverset" // Import discovery.serverset + _ "github.com/grafana/alloy/internal/component/discovery/triton" // Import discovery.triton + _ "github.com/grafana/alloy/internal/component/discovery/uyuni" // Import discovery.uyuni + _ "github.com/grafana/alloy/internal/component/faro/receiver" // Import faro.receiver + _ "github.com/grafana/alloy/internal/component/local/file" // Import local.file + _ "github.com/grafana/alloy/internal/component/local/file_match" // Import local.file_match + _ "github.com/grafana/alloy/internal/component/loki/echo" // Import loki.echo + _ "github.com/grafana/alloy/internal/component/loki/process" // Import loki.process + _ "github.com/grafana/alloy/internal/component/loki/relabel" // Import loki.relabel + _ "github.com/grafana/alloy/internal/component/loki/rules/kubernetes" // Import loki.rules.kubernetes + _ "github.com/grafana/alloy/internal/component/loki/source/api" // Import loki.source.api + _ "github.com/grafana/alloy/internal/component/loki/source/aws_firehose" // Import loki.source.awsfirehose + _ "github.com/grafana/alloy/internal/component/loki/source/azure_event_hubs" // Import loki.source.azure_event_hubs + _ "github.com/grafana/alloy/internal/component/loki/source/cloudflare" // Import loki.source.cloudflare + _ "github.com/grafana/alloy/internal/component/loki/source/docker" // Import loki.source.docker + _ "github.com/grafana/alloy/internal/component/loki/source/file" // Import loki.source.file + _ "github.com/grafana/alloy/internal/component/loki/source/gcplog" // Import loki.source.gcplog + _ "github.com/grafana/alloy/internal/component/loki/source/gelf" // Import loki.source.gelf + _ "github.com/grafana/alloy/internal/component/loki/source/heroku" // Import loki.source.heroku + _ "github.com/grafana/alloy/internal/component/loki/source/journal" // Import loki.source.journal + _ "github.com/grafana/alloy/internal/component/loki/source/kafka" // Import loki.source.kafka + _ "github.com/grafana/alloy/internal/component/loki/source/kubernetes" // Import loki.source.kubernetes + _ "github.com/grafana/alloy/internal/component/loki/source/kubernetes_events" // Import loki.source.kubernetes_events + _ "github.com/grafana/alloy/internal/component/loki/source/podlogs" // Import loki.source.podlogs + _ "github.com/grafana/alloy/internal/component/loki/source/syslog" // Import loki.source.syslog + _ "github.com/grafana/alloy/internal/component/loki/source/windowsevent" // Import loki.source.windowsevent + _ "github.com/grafana/alloy/internal/component/loki/write" // Import loki.write + _ "github.com/grafana/alloy/internal/component/mimir/rules/kubernetes" // Import mimir.rules.kubernetes + _ "github.com/grafana/alloy/internal/component/otelcol/auth/basic" // Import otelcol.auth.basic + _ "github.com/grafana/alloy/internal/component/otelcol/auth/bearer" // Import otelcol.auth.bearer + _ "github.com/grafana/alloy/internal/component/otelcol/auth/headers" // Import otelcol.auth.headers + _ "github.com/grafana/alloy/internal/component/otelcol/auth/oauth2" // Import otelcol.auth.oauth2 + _ "github.com/grafana/alloy/internal/component/otelcol/auth/sigv4" // Import otelcol.auth.sigv4 + _ "github.com/grafana/alloy/internal/component/otelcol/connector/host_info" // Import otelcol.connector.host_info + _ "github.com/grafana/alloy/internal/component/otelcol/connector/servicegraph" // Import otelcol.connector.servicegraph + _ "github.com/grafana/alloy/internal/component/otelcol/connector/spanlogs" // Import otelcol.connector.spanlogs + _ "github.com/grafana/alloy/internal/component/otelcol/connector/spanmetrics" // Import otelcol.connector.spanmetrics + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/loadbalancing" // Import otelcol.exporter.loadbalancing + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/logging" // Import otelcol.exporter.logging + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/loki" // Import otelcol.exporter.loki + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/otlp" // Import otelcol.exporter.otlp + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/otlphttp" // Import otelcol.exporter.otlphttp + _ "github.com/grafana/alloy/internal/component/otelcol/exporter/prometheus" // Import otelcol.exporter.prometheus + _ "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling" // Import otelcol.extension.jaeger_remote_sampling + _ "github.com/grafana/alloy/internal/component/otelcol/processor/attributes" // Import otelcol.processor.attributes + _ "github.com/grafana/alloy/internal/component/otelcol/processor/batch" // Import otelcol.processor.batch + _ "github.com/grafana/alloy/internal/component/otelcol/processor/discovery" // Import otelcol.processor.discovery + _ "github.com/grafana/alloy/internal/component/otelcol/processor/filter" // Import otelcol.processor.filter + _ "github.com/grafana/alloy/internal/component/otelcol/processor/k8sattributes" // Import otelcol.processor.k8sattributes + _ "github.com/grafana/alloy/internal/component/otelcol/processor/memorylimiter" // Import otelcol.processor.memory_limiter + _ "github.com/grafana/alloy/internal/component/otelcol/processor/probabilistic_sampler" // Import otelcol.processor.probabilistic_sampler + _ "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection" // Import otelcol.processor.resourcedetection + _ "github.com/grafana/alloy/internal/component/otelcol/processor/span" // Import otelcol.processor.span + _ "github.com/grafana/alloy/internal/component/otelcol/processor/tail_sampling" // Import otelcol.processor.tail_sampling + _ "github.com/grafana/alloy/internal/component/otelcol/processor/transform" // Import otelcol.processor.transform + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/jaeger" // Import otelcol.receiver.jaeger + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/kafka" // Import otelcol.receiver.kafka + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/loki" // Import otelcol.receiver.loki + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/opencensus" // Import otelcol.receiver.opencensus + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/otlp" // Import otelcol.receiver.otlp + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/prometheus" // Import otelcol.receiver.prometheus + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/vcenter" // Import otelcol.receiver.vcenter + _ "github.com/grafana/alloy/internal/component/otelcol/receiver/zipkin" // Import otelcol.receiver.zipkin + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/apache" // Import prometheus.exporter.apache + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/azure" // Import prometheus.exporter.azure + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/blackbox" // Import prometheus.exporter.blackbox + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/cadvisor" // Import prometheus.exporter.cadvisor + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/cloudwatch" // Import prometheus.exporter.cloudwatch + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/consul" // Import prometheus.exporter.consul + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/dnsmasq" // Import prometheus.exporter.dnsmasq + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/elasticsearch" // Import prometheus.exporter.elasticsearch + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/gcp" // Import prometheus.exporter.gcp + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/github" // Import prometheus.exporter.github + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/kafka" // Import prometheus.exporter.kafka + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/memcached" // Import prometheus.exporter.memcached + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/mongodb" // Import prometheus.exporter.mongodb + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/mssql" // Import prometheus.exporter.mssql + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/mysql" // Import prometheus.exporter.mysql + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/oracledb" // Import prometheus.exporter.oracledb + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/postgres" // Import prometheus.exporter.postgres + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/process" // Import prometheus.exporter.process + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/redis" // Import prometheus.exporter.redis + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/self" // Import prometheus.exporter.self + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/snmp" // Import prometheus.exporter.snmp + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/snowflake" // Import prometheus.exporter.snowflake + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/squid" // Import prometheus.exporter.squid + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/statsd" // Import prometheus.exporter.statsd + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/unix" // Import prometheus.exporter.unix + _ "github.com/grafana/alloy/internal/component/prometheus/exporter/windows" // Import prometheus.exporter.windows + _ "github.com/grafana/alloy/internal/component/prometheus/operator/podmonitors" // Import prometheus.operator.podmonitors + _ "github.com/grafana/alloy/internal/component/prometheus/operator/probes" // Import prometheus.operator.probes + _ "github.com/grafana/alloy/internal/component/prometheus/operator/servicemonitors" // Import prometheus.operator.servicemonitors + _ "github.com/grafana/alloy/internal/component/prometheus/receive_http" // Import prometheus.receive_http + _ "github.com/grafana/alloy/internal/component/prometheus/relabel" // Import prometheus.relabel + _ "github.com/grafana/alloy/internal/component/prometheus/remotewrite" // Import prometheus.remote_write + _ "github.com/grafana/alloy/internal/component/prometheus/scrape" // Import prometheus.scrape + _ "github.com/grafana/alloy/internal/component/pyroscope/ebpf" // Import pyroscope.ebpf + _ "github.com/grafana/alloy/internal/component/pyroscope/java" // Import pyroscope.java + _ "github.com/grafana/alloy/internal/component/pyroscope/scrape" // Import pyroscope.scrape + _ "github.com/grafana/alloy/internal/component/pyroscope/write" // Import pyroscope.write + _ "github.com/grafana/alloy/internal/component/remote/http" // Import remote.http + _ "github.com/grafana/alloy/internal/component/remote/kubernetes/configmap" // Import remote.kubernetes.configmap + _ "github.com/grafana/alloy/internal/component/remote/kubernetes/secret" // Import remote.kubernetes.secret + _ "github.com/grafana/alloy/internal/component/remote/s3" // Import remote.s3 + _ "github.com/grafana/alloy/internal/component/remote/vault" // Import remote.vault ) diff --git a/internal/component/all/all_test.go b/internal/component/all/all_test.go index 21030d559c..fe28c7739a 100644 --- a/internal/component/all/all_test.go +++ b/internal/component/all/all_test.go @@ -5,7 +5,7 @@ import ( "reflect" "testing" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/common/kubernetes/event.go b/internal/component/common/kubernetes/event.go index 6850500582..785c743ca5 100644 --- a/internal/component/common/kubernetes/event.go +++ b/internal/component/common/kubernetes/event.go @@ -2,7 +2,7 @@ package kubernetes import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) diff --git a/internal/component/common/kubernetes/kubernetes.go b/internal/component/common/kubernetes/kubernetes.go index 01d8c009d5..1a6ad2aff4 100644 --- a/internal/component/common/kubernetes/kubernetes.go +++ b/internal/component/common/kubernetes/kubernetes.go @@ -5,9 +5,9 @@ import ( "reflect" "github.com/go-kit/log" - commoncfg "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + commoncfg "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" promconfig "github.com/prometheus/common/config" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" diff --git a/internal/component/common/loki/client/batch.go b/internal/component/common/loki/client/batch.go index 7a15927cee..65bc3786a2 100644 --- a/internal/component/common/loki/client/batch.go +++ b/internal/component/common/loki/client/batch.go @@ -11,7 +11,7 @@ import ( "github.com/prometheus/common/model" "golang.org/x/exp/slices" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" ) diff --git a/internal/component/common/loki/client/batch_test.go b/internal/component/common/loki/client/batch_test.go index 255c0d38dc..cec96dc156 100644 --- a/internal/component/common/loki/client/batch_test.go +++ b/internal/component/common/loki/client/batch_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" ) diff --git a/internal/component/common/loki/client/client.go b/internal/component/common/loki/client/client.go index 8f127d9a73..857f56041f 100644 --- a/internal/component/common/loki/client/client.go +++ b/internal/component/common/loki/client/client.go @@ -13,15 +13,15 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" "github.com/grafana/dskit/backoff" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" lokiutil "github.com/grafana/loki/pkg/util" ) diff --git a/internal/component/common/loki/client/client_test.go b/internal/component/common/loki/client/client_test.go index 94f9e182e2..2ca5ee0442 100644 --- a/internal/component/common/loki/client/client_test.go +++ b/internal/component/common/loki/client/client_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/clients/pkg/promtail/utils" "github.com/grafana/loki/pkg/logproto" diff --git a/internal/component/common/loki/client/fake/client.go b/internal/component/common/loki/client/fake/client.go index e27f2c2c8a..c566504721 100644 --- a/internal/component/common/loki/client/fake/client.go +++ b/internal/component/common/loki/client/fake/client.go @@ -3,7 +3,7 @@ package fake import ( "sync" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) // Client is a fake client used for testing. diff --git a/internal/component/common/loki/client/internal/marker_file_handler.go b/internal/component/common/loki/client/internal/marker_file_handler.go index d96f14225d..143dcbcf15 100644 --- a/internal/component/common/loki/client/internal/marker_file_handler.go +++ b/internal/component/common/loki/client/internal/marker_file_handler.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/component/common/loki/wal" "github.com/natefinch/atomic" ) diff --git a/internal/component/common/loki/client/internal/marker_handler.go b/internal/component/common/loki/client/internal/marker_handler.go index 7b59e618f3..7bb78a9f6a 100644 --- a/internal/component/common/loki/client/internal/marker_handler.go +++ b/internal/component/common/loki/client/internal/marker_handler.go @@ -7,8 +7,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/wal" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/flow/logging/level" ) type MarkerHandler interface { diff --git a/internal/component/common/loki/client/internal/metrics.go b/internal/component/common/loki/client/internal/metrics.go index baf2700763..85d83af5fb 100644 --- a/internal/component/common/loki/client/internal/metrics.go +++ b/internal/component/common/loki/client/internal/metrics.go @@ -1,7 +1,7 @@ package internal import ( - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/common/loki/client/logger.go b/internal/component/common/loki/client/logger.go index 7f9790a542..1a23c481a4 100644 --- a/internal/component/common/loki/client/logger.go +++ b/internal/component/common/loki/client/logger.go @@ -12,9 +12,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "gopkg.in/yaml.v2" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/limit" - "github.com/grafana/agent/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/limit" + "github.com/grafana/alloy/internal/component/common/loki/wal" ) var ( diff --git a/internal/component/common/loki/client/logger_test.go b/internal/component/common/loki/client/logger_test.go index fd8451095e..3f43e9040e 100644 --- a/internal/component/common/loki/client/logger_test.go +++ b/internal/component/common/loki/client/logger_test.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" util_log "github.com/grafana/loki/pkg/util/log" ) diff --git a/internal/component/common/loki/client/manager.go b/internal/component/common/loki/client/manager.go index b9fa13c7ce..b57b93e9cf 100644 --- a/internal/component/common/loki/client/manager.go +++ b/internal/component/common/loki/client/manager.go @@ -7,13 +7,13 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/client/internal" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki/client/internal" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/limit" - "github.com/grafana/agent/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/limit" + "github.com/grafana/alloy/internal/component/common/loki/wal" ) // WriterEventsNotifier implements a notifier that's received by the Manager, to which wal.Watcher can subscribe for diff --git a/internal/component/common/loki/client/manager_test.go b/internal/component/common/loki/client/manager_test.go index c499e957d3..b6496dc6ce 100644 --- a/internal/component/common/loki/client/manager_test.go +++ b/internal/component/common/loki/client/manager_test.go @@ -16,10 +16,10 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/limit" - "github.com/grafana/agent/internal/component/common/loki/utils" - "github.com/grafana/agent/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/limit" + "github.com/grafana/alloy/internal/component/common/loki/utils" + "github.com/grafana/alloy/internal/component/common/loki/wal" "github.com/grafana/loki/pkg/logproto" lokiflag "github.com/grafana/loki/pkg/util/flagext" diff --git a/internal/component/common/loki/client/metrics.go b/internal/component/common/loki/client/metrics.go index 3bd8a2324a..66f8a53015 100644 --- a/internal/component/common/loki/client/metrics.go +++ b/internal/component/common/loki/client/metrics.go @@ -1,7 +1,7 @@ package client import ( - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/common/loki/client/queue_client.go b/internal/component/common/loki/client/queue_client.go index 51f14b91c6..0050406bb8 100644 --- a/internal/component/common/loki/client/queue_client.go +++ b/internal/component/common/loki/client/queue_client.go @@ -14,7 +14,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - agentWal "github.com/grafana/agent/internal/component/common/loki/wal" + agentWal "github.com/grafana/alloy/internal/component/common/loki/wal" "github.com/grafana/dskit/backoff" "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/common/loki/client/queue_client_test.go b/internal/component/common/loki/client/queue_client_test.go index daf798a955..57228150e9 100644 --- a/internal/component/common/loki/client/queue_client_test.go +++ b/internal/component/common/loki/client/queue_client_test.go @@ -8,9 +8,9 @@ import ( "github.com/alecthomas/units" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/client/internal" - "github.com/grafana/agent/internal/component/common/loki/utils" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/client/internal" + "github.com/grafana/alloy/internal/component/common/loki/utils" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/common/loki/positions/positions.go b/internal/component/common/loki/positions/positions.go index 0201d39e47..cefcd17386 100644 --- a/internal/component/common/loki/positions/positions.go +++ b/internal/component/common/loki/positions/positions.go @@ -15,7 +15,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" yaml "gopkg.in/yaml.v2" ) diff --git a/internal/component/common/loki/wal/internal/watcher_state.go b/internal/component/common/loki/wal/internal/watcher_state.go index e9ae3614c2..5de6aacc43 100644 --- a/internal/component/common/loki/wal/internal/watcher_state.go +++ b/internal/component/common/loki/wal/internal/watcher_state.go @@ -4,7 +4,7 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" ) const ( diff --git a/internal/component/common/loki/wal/reader.go b/internal/component/common/loki/wal/reader.go index 41cdc19787..8599a3ea39 100644 --- a/internal/component/common/loki/wal/reader.go +++ b/internal/component/common/loki/wal/reader.go @@ -5,7 +5,7 @@ import ( "github.com/prometheus/common/model" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/ingester/wal" "github.com/grafana/loki/pkg/util" walUtils "github.com/grafana/loki/pkg/util/wal" diff --git a/internal/component/common/loki/wal/wal.go b/internal/component/common/loki/wal/wal.go index 277c7d67df..eb23b4e631 100644 --- a/internal/component/common/loki/wal/wal.go +++ b/internal/component/common/loki/wal/wal.go @@ -5,7 +5,7 @@ import ( "os" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb/wlog" diff --git a/internal/component/common/loki/wal/watcher.go b/internal/component/common/loki/wal/watcher.go index a7818ac491..6de3cf2578 100644 --- a/internal/component/common/loki/wal/watcher.go +++ b/internal/component/common/loki/wal/watcher.go @@ -10,8 +10,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/wal/internal" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki/wal/internal" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" diff --git a/internal/component/common/loki/wal/watcher_metrics.go b/internal/component/common/loki/wal/watcher_metrics.go index 6c7d06b442..fac4ba80a3 100644 --- a/internal/component/common/loki/wal/watcher_metrics.go +++ b/internal/component/common/loki/wal/watcher_metrics.go @@ -1,7 +1,7 @@ package wal import ( - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/common/loki/wal/watcher_test.go b/internal/component/common/loki/wal/watcher_test.go index aa025bf302..714495860d 100644 --- a/internal/component/common/loki/wal/watcher_test.go +++ b/internal/component/common/loki/wal/watcher_test.go @@ -8,15 +8,15 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/tsdb/record" "github.com/stretchr/testify/require" "go.uber.org/atomic" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/utils" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/utils" "github.com/grafana/loki/pkg/ingester/wal" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/util" diff --git a/internal/component/common/loki/wal/writer.go b/internal/component/common/loki/wal/writer.go index a44b9e59dc..a3d29ad571 100644 --- a/internal/component/common/loki/wal/writer.go +++ b/internal/component/common/loki/wal/writer.go @@ -10,13 +10,13 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/ingester/wal" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/util" diff --git a/internal/component/common/loki/wal/writer_test.go b/internal/component/common/loki/wal/writer_test.go index 019506343e..cbfb378d5f 100644 --- a/internal/component/common/loki/wal/writer_test.go +++ b/internal/component/common/loki/wal/writer_test.go @@ -8,12 +8,12 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" ) diff --git a/internal/component/common/net/server.go b/internal/component/common/net/server.go index d892b341d2..eec9fb3a6e 100644 --- a/internal/component/common/net/server.go +++ b/internal/component/common/net/server.go @@ -5,7 +5,7 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" dskit "github.com/grafana/dskit/server" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/common/net/server_test.go b/internal/component/common/net/server_test.go index 657aee26b2..7ec848adbc 100644 --- a/internal/component/common/net/server_test.go +++ b/internal/component/common/net/server_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/component/component_health_test.go b/internal/component/component_health_test.go index 831fdc6c9e..d0bcbee08c 100644 --- a/internal/component/component_health_test.go +++ b/internal/component/component_health_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/aws/ec2.go b/internal/component/discovery/aws/ec2.go index 971dd25863..ba0d2016fe 100644 --- a/internal/component/discovery/aws/ec2.go +++ b/internal/component/discovery/aws/ec2.go @@ -7,10 +7,10 @@ import ( awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/aws/ec2_test.go b/internal/component/discovery/aws/ec2_test.go index d0239f43cd..845c1e0ceb 100644 --- a/internal/component/discovery/aws/ec2_test.go +++ b/internal/component/discovery/aws/ec2_test.go @@ -4,7 +4,7 @@ import ( "net/url" "testing" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/stretchr/testify/require" "gotest.tools/assert" ) diff --git a/internal/component/discovery/aws/lightsail.go b/internal/component/discovery/aws/lightsail.go index b64f5f9ba4..97d93b3537 100644 --- a/internal/component/discovery/aws/lightsail.go +++ b/internal/component/discovery/aws/lightsail.go @@ -7,10 +7,10 @@ import ( awsConfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/azure/azure.go b/internal/component/discovery/azure/azure.go index beb7fb64e8..61c8ea6403 100644 --- a/internal/component/discovery/azure/azure.go +++ b/internal/component/discovery/azure/azure.go @@ -5,10 +5,10 @@ import ( "time" "github.com/Azure/go-autorest/autorest/azure" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" common "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/azure/azure_test.go b/internal/component/discovery/azure/azure_test.go index f05f16c0fb..c1ef359388 100644 --- a/internal/component/discovery/azure/azure_test.go +++ b/internal/component/discovery/azure/azure_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/consul/consul.go b/internal/component/discovery/consul/consul.go index f68eb614ab..a5ce3dbb68 100644 --- a/internal/component/discovery/consul/consul.go +++ b/internal/component/discovery/consul/consul.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/consulagent/consulagent.go b/internal/component/discovery/consulagent/consulagent.go index d897072c9e..762ff6512f 100644 --- a/internal/component/discovery/consulagent/consulagent.go +++ b/internal/component/discovery/consulagent/consulagent.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/consulagent/promtail_consulagent.go b/internal/component/discovery/consulagent/promtail_consulagent.go index 2e5d92b336..7144394f30 100644 --- a/internal/component/discovery/consulagent/promtail_consulagent.go +++ b/internal/component/discovery/consulagent/promtail_consulagent.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" consul "github.com/hashicorp/consul/api" conntrack "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/discovery/digitalocean/digitalocean.go b/internal/component/discovery/digitalocean/digitalocean.go index bb7315eb15..1a305b201f 100644 --- a/internal/component/discovery/digitalocean/digitalocean.go +++ b/internal/component/discovery/digitalocean/digitalocean.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/digitalocean" diff --git a/internal/component/discovery/digitalocean/digitalocean_test.go b/internal/component/discovery/digitalocean/digitalocean_test.go index 18a628fc3e..629dec3910 100644 --- a/internal/component/discovery/digitalocean/digitalocean_test.go +++ b/internal/component/discovery/digitalocean/digitalocean_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" prom_common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/discovery.go b/internal/component/discovery/discovery.go index be260650d3..fc30bbcccc 100644 --- a/internal/component/discovery/discovery.go +++ b/internal/component/discovery/discovery.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/service/cluster" "github.com/grafana/ckit/shard" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" diff --git a/internal/component/discovery/dns/dns.go b/internal/component/discovery/dns/dns.go index 4f1309f37c..749a778c3d 100644 --- a/internal/component/discovery/dns/dns.go +++ b/internal/component/discovery/dns/dns.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/dns" ) diff --git a/internal/component/discovery/docker/docker.go b/internal/component/discovery/docker/docker.go index 0712c20e63..54b2afc963 100644 --- a/internal/component/discovery/docker/docker.go +++ b/internal/component/discovery/docker/docker.go @@ -6,10 +6,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/moby" ) diff --git a/internal/component/discovery/dockerswarm/dockerswarm.go b/internal/component/discovery/dockerswarm/dockerswarm.go index f7caafc665..4d5ac4fd60 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm.go +++ b/internal/component/discovery/dockerswarm/dockerswarm.go @@ -5,10 +5,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/moby" ) diff --git a/internal/component/discovery/dockerswarm/dockerswarm_test.go b/internal/component/discovery/dockerswarm/dockerswarm_test.go index d36d2198e7..c56b1fde59 100644 --- a/internal/component/discovery/dockerswarm/dockerswarm_test.go +++ b/internal/component/discovery/dockerswarm/dockerswarm_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" diff --git a/internal/component/discovery/eureka/eureka.go b/internal/component/discovery/eureka/eureka.go index 3221a681a9..ba4516b252 100644 --- a/internal/component/discovery/eureka/eureka.go +++ b/internal/component/discovery/eureka/eureka.go @@ -5,10 +5,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/eureka" ) diff --git a/internal/component/discovery/eureka/eureka_test.go b/internal/component/discovery/eureka/eureka_test.go index 618150e39a..f50213a0af 100644 --- a/internal/component/discovery/eureka/eureka_test.go +++ b/internal/component/discovery/eureka/eureka_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" diff --git a/internal/component/discovery/file/file.go b/internal/component/discovery/file/file.go index 4611df0e46..a98163a593 100644 --- a/internal/component/discovery/file/file.go +++ b/internal/component/discovery/file/file.go @@ -3,9 +3,9 @@ package file import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/file" ) diff --git a/internal/component/discovery/gce/gce.go b/internal/component/discovery/gce/gce.go index 5288f9b73d..60af462035 100644 --- a/internal/component/discovery/gce/gce.go +++ b/internal/component/discovery/gce/gce.go @@ -4,9 +4,9 @@ package gce import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/gce" ) diff --git a/internal/component/discovery/hetzner/hetzner.go b/internal/component/discovery/hetzner/hetzner.go index 974260e3da..631583da87 100644 --- a/internal/component/discovery/hetzner/hetzner.go +++ b/internal/component/discovery/hetzner/hetzner.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/hetzner" ) diff --git a/internal/component/discovery/http/http.go b/internal/component/discovery/http/http.go index 098d3eed12..257c9f6af3 100644 --- a/internal/component/discovery/http/http.go +++ b/internal/component/discovery/http/http.go @@ -3,10 +3,10 @@ package http import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/http" diff --git a/internal/component/discovery/http/http_test.go b/internal/component/discovery/http/http_test.go index 6c81591b9b..fb99660a38 100644 --- a/internal/component/discovery/http/http_test.go +++ b/internal/component/discovery/http/http_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/discovery/ionos/ionos.go b/internal/component/discovery/ionos/ionos.go index 117ae38e87..4d42caa04f 100644 --- a/internal/component/discovery/ionos/ionos.go +++ b/internal/component/discovery/ionos/ionos.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/ionos" ) diff --git a/internal/component/discovery/ionos/ionos_test.go b/internal/component/discovery/ionos/ionos_test.go index 4129eb7112..14a9adf08f 100644 --- a/internal/component/discovery/ionos/ionos_test.go +++ b/internal/component/discovery/ionos/ionos_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" diff --git a/internal/component/discovery/kubelet/kubelet.go b/internal/component/discovery/kubelet/kubelet.go index 835c0d24d3..5777b25565 100644 --- a/internal/component/discovery/kubelet/kubelet.go +++ b/internal/component/discovery/kubelet/kubelet.go @@ -13,10 +13,10 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" commonConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/refresh" diff --git a/internal/component/discovery/kubelet/kubelet_test.go b/internal/component/discovery/kubelet/kubelet_test.go index cec0e2b05d..29098ac7dd 100644 --- a/internal/component/discovery/kubelet/kubelet_test.go +++ b/internal/component/discovery/kubelet/kubelet_test.go @@ -8,7 +8,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/kubernetes/kubernetes.go b/internal/component/discovery/kubernetes/kubernetes.go index 1e622aa72a..e3a5b0bf6c 100644 --- a/internal/component/discovery/kubernetes/kubernetes.go +++ b/internal/component/discovery/kubernetes/kubernetes.go @@ -2,10 +2,10 @@ package kubernetes import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" promk8s "github.com/prometheus/prometheus/discovery/kubernetes" ) diff --git a/internal/component/discovery/kuma/kuma.go b/internal/component/discovery/kuma/kuma.go index c91aa3b5f4..4fdb2e2971 100644 --- a/internal/component/discovery/kuma/kuma.go +++ b/internal/component/discovery/kuma/kuma.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/xds" ) diff --git a/internal/component/discovery/kuma/kuma_test.go b/internal/component/discovery/kuma/kuma_test.go index 3a4942c71a..c82e97bd50 100644 --- a/internal/component/discovery/kuma/kuma_test.go +++ b/internal/component/discovery/kuma/kuma_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/linode/linode.go b/internal/component/discovery/linode/linode.go index 8031bde15b..8d5691ebbf 100644 --- a/internal/component/discovery/linode/linode.go +++ b/internal/component/discovery/linode/linode.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/linode" ) diff --git a/internal/component/discovery/linode/linode_test.go b/internal/component/discovery/linode/linode_test.go index 9c7361e623..4512a6ff59 100644 --- a/internal/component/discovery/linode/linode_test.go +++ b/internal/component/discovery/linode/linode_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/marathon/marathon.go b/internal/component/discovery/marathon/marathon.go index e7630cf834..20081722a4 100644 --- a/internal/component/discovery/marathon/marathon.go +++ b/internal/component/discovery/marathon/marathon.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/marathon/marathon_test.go b/internal/component/discovery/marathon/marathon_test.go index bade375386..fcc094bdfb 100644 --- a/internal/component/discovery/marathon/marathon_test.go +++ b/internal/component/discovery/marathon/marathon_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promConfig "github.com/prometheus/common/config" diff --git a/internal/component/discovery/nerve/nerve.go b/internal/component/discovery/nerve/nerve.go index d8521a80c9..85faad6cc8 100644 --- a/internal/component/discovery/nerve/nerve.go +++ b/internal/component/discovery/nerve/nerve.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/zookeeper" ) diff --git a/internal/component/discovery/nomad/nomad.go b/internal/component/discovery/nomad/nomad.go index b324add03e..22f06de73b 100644 --- a/internal/component/discovery/nomad/nomad.go +++ b/internal/component/discovery/nomad/nomad.go @@ -5,10 +5,10 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/nomad" ) diff --git a/internal/component/discovery/openstack/openstack.go b/internal/component/discovery/openstack/openstack.go index 892126ce8e..5ac5f038d1 100644 --- a/internal/component/discovery/openstack/openstack.go +++ b/internal/component/discovery/openstack/openstack.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/openstack/openstack_test.go b/internal/component/discovery/openstack/openstack_test.go index 1fec5b50d7..ea180b4347 100644 --- a/internal/component/discovery/openstack/openstack_test.go +++ b/internal/component/discovery/openstack/openstack_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/ovhcloud/ovhcloud.go b/internal/component/discovery/ovhcloud/ovhcloud.go index 1117b35381..c6a51bf641 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud.go +++ b/internal/component/discovery/ovhcloud/ovhcloud.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/ovhcloud/ovhcloud_test.go b/internal/component/discovery/ovhcloud/ovhcloud_test.go index 064c5a5e01..b627bf89f3 100644 --- a/internal/component/discovery/ovhcloud/ovhcloud_test.go +++ b/internal/component/discovery/ovhcloud/ovhcloud_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/discovery/ovhcloud" + "github.com/grafana/alloy/internal/component/discovery/ovhcloud" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" prom_ovh "github.com/prometheus/prometheus/discovery/ovhcloud" diff --git a/internal/component/discovery/process/args.go b/internal/component/discovery/process/args.go index 45ed6649bb..b01c693da4 100644 --- a/internal/component/discovery/process/args.go +++ b/internal/component/discovery/process/args.go @@ -3,7 +3,7 @@ package process import ( "time" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" ) type Arguments struct { diff --git a/internal/component/discovery/process/container.go b/internal/component/discovery/process/container.go index 89811eebd8..1ac7842818 100644 --- a/internal/component/discovery/process/container.go +++ b/internal/component/discovery/process/container.go @@ -8,7 +8,7 @@ import ( "regexp" "strings" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" ) var ( diff --git a/internal/component/discovery/process/discover.go b/internal/component/discovery/process/discover.go index f17f40c989..009cac43be 100644 --- a/internal/component/discovery/process/discover.go +++ b/internal/component/discovery/process/discover.go @@ -12,7 +12,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" gopsutil "github.com/shirou/gopsutil/v3/process" "golang.org/x/sys/unix" ) diff --git a/internal/component/discovery/process/join.go b/internal/component/discovery/process/join.go index 5c3b612de4..5c2728cc6e 100644 --- a/internal/component/discovery/process/join.go +++ b/internal/component/discovery/process/join.go @@ -2,7 +2,7 @@ package process -import "github.com/grafana/agent/internal/component/discovery" +import "github.com/grafana/alloy/internal/component/discovery" func join(processes, containers []discovery.Target) []discovery.Target { res := make([]discovery.Target, 0, len(processes)+len(containers)) diff --git a/internal/component/discovery/process/join_test.go b/internal/component/discovery/process/join_test.go index 5aa05d3a00..3f2e9dfa5a 100644 --- a/internal/component/discovery/process/join_test.go +++ b/internal/component/discovery/process/join_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/discovery/process/process.go b/internal/component/discovery/process/process.go index a5b2ec6258..933e169d2c 100644 --- a/internal/component/discovery/process/process.go +++ b/internal/component/discovery/process/process.go @@ -7,9 +7,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/discovery/process/process_stub.go b/internal/component/discovery/process/process_stub.go index 74c1f394ea..6266d77d99 100644 --- a/internal/component/discovery/process/process_stub.go +++ b/internal/component/discovery/process/process_stub.go @@ -5,10 +5,10 @@ package process import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/discovery/puppetdb/puppetdb.go b/internal/component/discovery/puppetdb/puppetdb.go index 1b1b29ea87..297060cea4 100644 --- a/internal/component/discovery/puppetdb/puppetdb.go +++ b/internal/component/discovery/puppetdb/puppetdb.go @@ -5,10 +5,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/puppetdb" ) diff --git a/internal/component/discovery/relabel/relabel.go b/internal/component/discovery/relabel/relabel.go index dc75ba4418..c4f2c419db 100644 --- a/internal/component/discovery/relabel/relabel.go +++ b/internal/component/discovery/relabel/relabel.go @@ -4,10 +4,10 @@ import ( "context" "sync" - "github.com/grafana/agent/internal/component" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" ) diff --git a/internal/component/discovery/relabel/relabel_test.go b/internal/component/discovery/relabel/relabel_test.go index eb6add2ca9..536ab7dd62 100644 --- a/internal/component/discovery/relabel/relabel_test.go +++ b/internal/component/discovery/relabel/relabel_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/relabel" - "github.com/grafana/agent/internal/flow/componenttest" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/relabel" + "github.com/grafana/alloy/internal/flow/componenttest" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/discovery/scaleway/scaleway.go b/internal/component/discovery/scaleway/scaleway.go index 2b2a30ce7a..c760a4d965 100644 --- a/internal/component/discovery/scaleway/scaleway.go +++ b/internal/component/discovery/scaleway/scaleway.go @@ -6,10 +6,10 @@ import ( "reflect" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/serverset/serverset.go b/internal/component/discovery/serverset/serverset.go index e706e3a8e7..58e7844f34 100644 --- a/internal/component/discovery/serverset/serverset.go +++ b/internal/component/discovery/serverset/serverset.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/zookeeper" ) diff --git a/internal/component/discovery/triton/triton.go b/internal/component/discovery/triton/triton.go index c74e7c5283..7b7e990e6b 100644 --- a/internal/component/discovery/triton/triton.go +++ b/internal/component/discovery/triton/triton.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/prometheus/common/model" prom_discovery "github.com/prometheus/prometheus/discovery/triton" ) diff --git a/internal/component/discovery/uyuni/uyuni.go b/internal/component/discovery/uyuni/uyuni.go index f9408d0a22..6af2cea094 100644 --- a/internal/component/discovery/uyuni/uyuni.go +++ b/internal/component/discovery/uyuni/uyuni.go @@ -5,10 +5,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/discovery/uyuni/uyuni_test.go b/internal/component/discovery/uyuni/uyuni_test.go index e5b1536430..ee2e06ce76 100644 --- a/internal/component/discovery/uyuni/uyuni_test.go +++ b/internal/component/discovery/uyuni/uyuni_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/syntax" promcfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index 3effb1f964..7316de5678 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -4,8 +4,8 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/component/faro/receiver/exporters.go b/internal/component/faro/receiver/exporters.go index 57514b417e..6609aa1aa9 100644 --- a/internal/component/faro/receiver/exporters.go +++ b/internal/component/faro/receiver/exporters.go @@ -8,10 +8,10 @@ import ( "github.com/go-kit/log" "github.com/go-logfmt/logfmt" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/faro/receiver/exporters_test.go b/internal/component/faro/receiver/exporters_test.go index a6ec19ecf1..5217ad4ddc 100644 --- a/internal/component/faro/receiver/exporters_test.go +++ b/internal/component/faro/receiver/exporters_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" "github.com/prometheus/client_golang/prometheus" promtestutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" diff --git a/internal/component/faro/receiver/handler.go b/internal/component/faro/receiver/handler.go index e6207e511a..68efa9c633 100644 --- a/internal/component/faro/receiver/handler.go +++ b/internal/component/faro/receiver/handler.go @@ -8,8 +8,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/rs/cors" "golang.org/x/time/rate" diff --git a/internal/component/faro/receiver/handler_test.go b/internal/component/faro/receiver/handler_test.go index 79f93fe744..228b8c87cd 100644 --- a/internal/component/faro/receiver/handler_test.go +++ b/internal/component/faro/receiver/handler_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/faro/receiver/receiver.go b/internal/component/faro/receiver/receiver.go index 1a77503498..2e7531950b 100644 --- a/internal/component/faro/receiver/receiver.go +++ b/internal/component/faro/receiver/receiver.go @@ -8,9 +8,9 @@ import ( "github.com/go-kit/log" "github.com/go-sourcemap/sourcemap" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/faro/receiver/receiver_test.go b/internal/component/faro/receiver/receiver_test.go index 4e6e78e04e..920faad024 100644 --- a/internal/component/faro/receiver/receiver_test.go +++ b/internal/component/faro/receiver/receiver_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/loki/pkg/logproto" "github.com/phayes/freeport" "github.com/prometheus/common/model" diff --git a/internal/component/faro/receiver/server.go b/internal/component/faro/receiver/server.go index 1b96fa1b4e..8207594700 100644 --- a/internal/component/faro/receiver/server.go +++ b/internal/component/faro/receiver/server.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/middleware" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/faro/receiver/sourcemaps.go b/internal/component/faro/receiver/sourcemaps.go index 5dc1e7643f..4de618b983 100644 --- a/internal/component/faro/receiver/sourcemaps.go +++ b/internal/component/faro/receiver/sourcemaps.go @@ -16,9 +16,9 @@ import ( "github.com/go-kit/log" "github.com/go-sourcemap/sourcemap" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util/wildcard" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util/wildcard" "github.com/prometheus/client_golang/prometheus" "github.com/vincent-petithory/dataurl" ) diff --git a/internal/component/faro/receiver/sourcemaps_test.go b/internal/component/faro/receiver/sourcemaps_test.go index 6ad1f7fcdd..87a7dbf4cc 100644 --- a/internal/component/faro/receiver/sourcemaps_test.go +++ b/internal/component/faro/receiver/sourcemaps_test.go @@ -10,8 +10,8 @@ import ( "path/filepath" "testing" - "github.com/grafana/agent/internal/component/faro/receiver/internal/payload" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/faro/receiver/internal/payload" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/component/local/file/file.go b/internal/component/local/file/file.go index eb7899a8d8..941d1c5140 100644 --- a/internal/component/local/file/file.go +++ b/internal/component/local/file/file.go @@ -10,10 +10,10 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - filedetector "github.com/grafana/agent/internal/filedetector" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + filedetector "github.com/grafana/alloy/internal/filedetector" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/component/local/file/file_test.go b/internal/component/local/file/file_test.go index 8f6e304d41..3f119a46d5 100644 --- a/internal/component/local/file/file_test.go +++ b/internal/component/local/file/file_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/local/file" - filedetector "github.com/grafana/agent/internal/filedetector" - "github.com/grafana/agent/internal/flow/componenttest" + "github.com/grafana/alloy/internal/component/local/file" + filedetector "github.com/grafana/alloy/internal/filedetector" + "github.com/grafana/alloy/internal/flow/componenttest" "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" ) diff --git a/internal/component/local/file_match/file.go b/internal/component/local/file_match/file.go index 00f3ffad7d..c2421610fa 100644 --- a/internal/component/local/file_match/file.go +++ b/internal/component/local/file_match/file.go @@ -5,10 +5,10 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/local/file_match/file_test.go b/internal/component/local/file_match/file_test.go index 43795b6f6c..8a19074c32 100644 --- a/internal/component/local/file_match/file_test.go +++ b/internal/component/local/file_match/file_test.go @@ -10,12 +10,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/component/local/file_match/watch.go b/internal/component/local/file_match/watch.go index 21a6778eb9..e66544478c 100644 --- a/internal/component/local/file_match/watch.go +++ b/internal/component/local/file_match/watch.go @@ -7,8 +7,8 @@ import ( "github.com/go-kit/log" "github.com/bmatcuk/doublestar" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/flow/logging/level" ) // watch handles a single discovery.target for file watching. diff --git a/internal/component/loki/echo/echo.go b/internal/component/loki/echo/echo.go index 9f0ca8d6a1..1596893c2c 100644 --- a/internal/component/loki/echo/echo.go +++ b/internal/component/loki/echo/echo.go @@ -4,10 +4,10 @@ import ( "context" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/loki/process/process.go b/internal/component/loki/process/process.go index 4183c4b275..5fe4305841 100644 --- a/internal/component/loki/process/process.go +++ b/internal/component/loki/process/process.go @@ -8,10 +8,10 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/loki/process/stages" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/loki/process/stages" + "github.com/grafana/alloy/internal/featuregate" ) // TODO(thampiotr): We should reconsider which parts of this component should be exported and which should diff --git a/internal/component/loki/process/process_test.go b/internal/component/loki/process/process_test.go index cb3e4b925f..57db4bcfde 100644 --- a/internal/component/loki/process/process_test.go +++ b/internal/component/loki/process/process_test.go @@ -8,13 +8,13 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/loki/process/stages" - lsf "github.com/grafana/agent/internal/component/loki/source/file" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/loki/process/stages" + lsf "github.com/grafana/alloy/internal/component/loki/source/file" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/process/stages/drop.go b/internal/component/loki/process/stages/drop.go index e1c3b44421..1cdc6a2f70 100644 --- a/internal/component/loki/process/stages/drop.go +++ b/internal/component/loki/process/stages/drop.go @@ -10,7 +10,7 @@ import ( "github.com/alecthomas/units" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/loki/process/stages/drop_test.go b/internal/component/loki/process/stages/drop_test.go index 820d597d0a..55e5c2a9f9 100644 --- a/internal/component/loki/process/stages/drop_test.go +++ b/internal/component/loki/process/stages/drop_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" dskit "github.com/grafana/dskit/server" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/loki/process/stages/eventlogmessage.go b/internal/component/loki/process/stages/eventlogmessage.go index 58cab36a5d..884238bbe8 100644 --- a/internal/component/loki/process/stages/eventlogmessage.go +++ b/internal/component/loki/process/stages/eventlogmessage.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/extensions.go b/internal/component/loki/process/stages/extensions.go index 7b0cccd110..a2734542a3 100644 --- a/internal/component/loki/process/stages/extensions.go +++ b/internal/component/loki/process/stages/extensions.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/loki/process/stages/geoip.go b/internal/component/loki/process/stages/geoip.go index 8822b9f20f..c60ce79e40 100644 --- a/internal/component/loki/process/stages/geoip.go +++ b/internal/component/loki/process/stages/geoip.go @@ -7,7 +7,7 @@ import ( "reflect" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/jmespath/go-jmespath" "github.com/oschwald/geoip2-golang" "github.com/oschwald/maxminddb-golang" diff --git a/internal/component/loki/process/stages/json.go b/internal/component/loki/process/stages/json.go index 4ee263cbc4..c80535a1f4 100644 --- a/internal/component/loki/process/stages/json.go +++ b/internal/component/loki/process/stages/json.go @@ -6,7 +6,7 @@ import ( "reflect" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/jmespath/go-jmespath" json "github.com/json-iterator/go" ) diff --git a/internal/component/loki/process/stages/json_test.go b/internal/component/loki/process/stages/json_test.go index 558cea0b42..6e695cef68 100644 --- a/internal/component/loki/process/stages/json_test.go +++ b/internal/component/loki/process/stages/json_test.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/loki/process/stages/labels.go b/internal/component/loki/process/stages/labels.go index 5bec9583ff..978faa506a 100644 --- a/internal/component/loki/process/stages/labels.go +++ b/internal/component/loki/process/stages/labels.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/limit.go b/internal/component/loki/process/stages/limit.go index 51b0c3e825..3179ec099b 100644 --- a/internal/component/loki/process/stages/limit.go +++ b/internal/component/loki/process/stages/limit.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/time/rate" diff --git a/internal/component/loki/process/stages/logfmt.go b/internal/component/loki/process/stages/logfmt.go index 0cf006b265..aee22bfc7a 100644 --- a/internal/component/loki/process/stages/logfmt.go +++ b/internal/component/loki/process/stages/logfmt.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-logfmt/logfmt" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/logfmt_test.go b/internal/component/loki/process/stages/logfmt_test.go index 961cc8ab44..ed620e73a9 100644 --- a/internal/component/loki/process/stages/logfmt_test.go +++ b/internal/component/loki/process/stages/logfmt_test.go @@ -7,7 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" util_log "github.com/grafana/loki/pkg/util/log" ) diff --git a/internal/component/loki/process/stages/match_test.go b/internal/component/loki/process/stages/match_test.go index c35c85a160..5e0f52600b 100644 --- a/internal/component/loki/process/stages/match_test.go +++ b/internal/component/loki/process/stages/match_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/loki/process/stages/metric.go b/internal/component/loki/process/stages/metric.go index 8d0ca48785..92f63da512 100644 --- a/internal/component/loki/process/stages/metric.go +++ b/internal/component/loki/process/stages/metric.go @@ -9,8 +9,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/loki/process/metric" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/loki/process/metric" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/metric_test.go b/internal/component/loki/process/stages/metric_test.go index 559c5fc913..23f06f1ce0 100644 --- a/internal/component/loki/process/stages/metric_test.go +++ b/internal/component/loki/process/stages/metric_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/loki/process/metric" + "github.com/grafana/alloy/internal/component/loki/process/metric" util_log "github.com/grafana/loki/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" diff --git a/internal/component/loki/process/stages/multiline.go b/internal/component/loki/process/stages/multiline.go index 0e4a7d234b..2b7b0af971 100644 --- a/internal/component/loki/process/stages/multiline.go +++ b/internal/component/loki/process/stages/multiline.go @@ -9,8 +9,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/multiline_test.go b/internal/component/loki/process/stages/multiline_test.go index 2f236e3a35..d85bf3479c 100644 --- a/internal/component/loki/process/stages/multiline_test.go +++ b/internal/component/loki/process/stages/multiline_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/process/stages/output.go b/internal/component/loki/process/stages/output.go index b82d3cd3fb..0a44f02f3d 100644 --- a/internal/component/loki/process/stages/output.go +++ b/internal/component/loki/process/stages/output.go @@ -6,7 +6,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/output_test.go b/internal/component/loki/process/stages/output_test.go index b2e6fbb021..d5ba6076db 100644 --- a/internal/component/loki/process/stages/output_test.go +++ b/internal/component/loki/process/stages/output_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/process/stages/pack.go b/internal/component/loki/process/stages/pack.go index 2aece44982..11d223a7cc 100644 --- a/internal/component/loki/process/stages/pack.go +++ b/internal/component/loki/process/stages/pack.go @@ -9,7 +9,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logqlmodel" json "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/process/stages/pack_test.go b/internal/component/loki/process/stages/pack_test.go index 1bf443f8bd..43f6a172c5 100644 --- a/internal/component/loki/process/stages/pack_test.go +++ b/internal/component/loki/process/stages/pack_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel" json "github.com/json-iterator/go" diff --git a/internal/component/loki/process/stages/pipeline.go b/internal/component/loki/process/stages/pipeline.go index 65885c9140..ab5b5ab1d5 100644 --- a/internal/component/loki/process/stages/pipeline.go +++ b/internal/component/loki/process/stages/pipeline.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/prometheus/client_golang/prometheus" "golang.org/x/time/rate" ) diff --git a/internal/component/loki/process/stages/pipeline_test.go b/internal/component/loki/process/stages/pipeline_test.go index 0fcf8917ff..8f50203d48 100644 --- a/internal/component/loki/process/stages/pipeline_test.go +++ b/internal/component/loki/process/stages/pipeline_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" util_log "github.com/grafana/loki/pkg/util/log" diff --git a/internal/component/loki/process/stages/regex.go b/internal/component/loki/process/stages/regex.go index ef3f28abf6..cd1b754931 100644 --- a/internal/component/loki/process/stages/regex.go +++ b/internal/component/loki/process/stages/regex.go @@ -8,7 +8,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/mitchellh/mapstructure" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/regex_test.go b/internal/component/loki/process/stages/regex_test.go index f9928aa62a..d17ce2e7a9 100644 --- a/internal/component/loki/process/stages/regex_test.go +++ b/internal/component/loki/process/stages/regex_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" diff --git a/internal/component/loki/process/stages/replace.go b/internal/component/loki/process/stages/replace.go index 266b5a5d9e..35e63a7943 100644 --- a/internal/component/loki/process/stages/replace.go +++ b/internal/component/loki/process/stages/replace.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/replace_test.go b/internal/component/loki/process/stages/replace_test.go index b03ccee7a4..3f940fbd70 100644 --- a/internal/component/loki/process/stages/replace_test.go +++ b/internal/component/loki/process/stages/replace_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/loki/process/stages/stage.go b/internal/component/loki/process/stages/stage.go index a5657d570e..aad532e72a 100644 --- a/internal/component/loki/process/stages/stage.go +++ b/internal/component/loki/process/stages/stage.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "gopkg.in/yaml.v2" diff --git a/internal/component/loki/process/stages/static_labels.go b/internal/component/loki/process/stages/static_labels.go index df8c5d244d..113d561111 100644 --- a/internal/component/loki/process/stages/static_labels.go +++ b/internal/component/loki/process/stages/static_labels.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/template.go b/internal/component/loki/process/stages/template.go index cf8c7ab50d..342b2254c0 100644 --- a/internal/component/loki/process/stages/template.go +++ b/internal/component/loki/process/stages/template.go @@ -13,7 +13,7 @@ import ( "github.com/Masterminds/sprig/v3" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" "golang.org/x/crypto/sha3" diff --git a/internal/component/loki/process/stages/tenant.go b/internal/component/loki/process/stages/tenant.go index a95cd2f1bc..4e93ab2ebb 100644 --- a/internal/component/loki/process/stages/tenant.go +++ b/internal/component/loki/process/stages/tenant.go @@ -6,7 +6,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/process/stages/timestamp.go b/internal/component/loki/process/stages/timestamp.go index e7f8d12e98..7474cf089a 100644 --- a/internal/component/loki/process/stages/timestamp.go +++ b/internal/component/loki/process/stages/timestamp.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" lru "github.com/hashicorp/golang-lru" "github.com/prometheus/common/model" diff --git a/internal/component/loki/process/stages/timestamp_test.go b/internal/component/loki/process/stages/timestamp_test.go index da95753cdf..7e7de1887d 100644 --- a/internal/component/loki/process/stages/timestamp_test.go +++ b/internal/component/loki/process/stages/timestamp_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" diff --git a/internal/component/loki/process/stages/util_test.go b/internal/component/loki/process/stages/util_test.go index a2d6313b6a..3b086ef6a6 100644 --- a/internal/component/loki/process/stages/util_test.go +++ b/internal/component/loki/process/stages/util_test.go @@ -8,7 +8,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" ) diff --git a/internal/component/loki/relabel/relabel.go b/internal/component/loki/relabel/relabel.go index 1da52e9d77..d094ddffe6 100644 --- a/internal/component/loki/relabel/relabel.go +++ b/internal/component/loki/relabel/relabel.go @@ -5,11 +5,11 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" lru "github.com/hashicorp/golang-lru" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/relabel/relabel_test.go b/internal/component/loki/relabel/relabel_test.go index bb123fc2a6..51f7837650 100644 --- a/internal/component/loki/relabel/relabel_test.go +++ b/internal/component/loki/relabel/relabel_test.go @@ -7,13 +7,13 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - lsf "github.com/grafana/agent/internal/component/loki/source/file" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + lsf "github.com/grafana/alloy/internal/component/loki/source/file" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/rules/kubernetes/events.go b/internal/component/loki/rules/kubernetes/events.go index cde73f79cd..78d184339d 100644 --- a/internal/component/loki/rules/kubernetes/events.go +++ b/internal/component/loki/rules/kubernetes/events.go @@ -6,8 +6,8 @@ import ( "regexp" "time" - "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/hashicorp/go-multierror" promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/internal/component/loki/rules/kubernetes/events_test.go b/internal/component/loki/rules/kubernetes/events_test.go index 8c0b5f928e..62f1b4f444 100644 --- a/internal/component/loki/rules/kubernetes/events_test.go +++ b/internal/component/loki/rules/kubernetes/events_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/kubernetes" - lokiClient "github.com/grafana/agent/internal/loki/client" + "github.com/grafana/alloy/internal/component/common/kubernetes" + lokiClient "github.com/grafana/alloy/internal/loki/client" v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promListers "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/internal/component/loki/rules/kubernetes/health.go b/internal/component/loki/rules/kubernetes/health.go index 101bfd1d0a..0b29d899f2 100644 --- a/internal/component/loki/rules/kubernetes/health.go +++ b/internal/component/loki/rules/kubernetes/health.go @@ -3,7 +3,7 @@ package rules import ( "time" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" ) func (c *Component) reportUnhealthy(err error) { diff --git a/internal/component/loki/rules/kubernetes/rules.go b/internal/component/loki/rules/kubernetes/rules.go index 081b1f283c..c38da8d005 100644 --- a/internal/component/loki/rules/kubernetes/rules.go +++ b/internal/component/loki/rules/kubernetes/rules.go @@ -7,11 +7,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - commonK8s "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - lokiClient "github.com/grafana/agent/internal/loki/client" + "github.com/grafana/alloy/internal/component" + commonK8s "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + lokiClient "github.com/grafana/alloy/internal/loki/client" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/instrument" promListers "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" diff --git a/internal/component/loki/rules/kubernetes/types.go b/internal/component/loki/rules/kubernetes/types.go index 1cb84f96ea..89cdeb90cc 100644 --- a/internal/component/loki/rules/kubernetes/types.go +++ b/internal/component/loki/rules/kubernetes/types.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/kubernetes" ) type Arguments struct { diff --git a/internal/component/loki/source/api/api.go b/internal/component/loki/source/api/api.go index cf3002ab65..244bc4dfaf 100644 --- a/internal/component/loki/source/api/api.go +++ b/internal/component/loki/source/api/api.go @@ -6,13 +6,13 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/api/internal/lokipush" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/api/internal/lokipush" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/api/api_test.go b/internal/component/loki/source/api/api_test.go index df5a9785b6..bc4263d045 100644 --- a/internal/component/loki/source/api/api_test.go +++ b/internal/component/loki/source/api/api_test.go @@ -9,13 +9,13 @@ import ( "github.com/phayes/freeport" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/client" - "github.com/grafana/agent/internal/component/common/loki/client/fake" - "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/client" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/util" "github.com/grafana/dskit/flagext" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/regexp" diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server.go b/internal/component/loki/source/api/internal/lokipush/push_api_server.go index 1842522dfa..33f06fc1ef 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server.go @@ -11,10 +11,10 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - frelabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + frelabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/tenant" "github.com/grafana/loki/pkg/loghttp/push" "github.com/grafana/loki/pkg/logproto" diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go index 5c11ffffee..e58184ff7a 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go @@ -16,11 +16,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/client" - "github.com/grafana/agent/internal/component/common/loki/client/fake" - fnet "github.com/grafana/agent/internal/component/common/net" - frelabel "github.com/grafana/agent/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/client" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" + fnet "github.com/grafana/alloy/internal/component/common/net" + frelabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/flagext" "github.com/grafana/loki/pkg/logproto" diff --git a/internal/component/loki/source/aws_firehose/component.go b/internal/component/loki/source/aws_firehose/component.go index 7088056e2e..274b00b100 100644 --- a/internal/component/loki/source/aws_firehose/component.go +++ b/internal/component/loki/source/aws_firehose/component.go @@ -8,17 +8,17 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/aws_firehose/internal" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/aws_firehose/internal" + "github.com/grafana/alloy/internal/util" ) func init() { diff --git a/internal/component/loki/source/aws_firehose/component_test.go b/internal/component/loki/source/aws_firehose/component_test.go index 164f4d5b45..05fa1ad0e8 100644 --- a/internal/component/loki/source/aws_firehose/component_test.go +++ b/internal/component/loki/source/aws_firehose/component_test.go @@ -15,11 +15,11 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/util" ) const singleRecordRequest = `{"requestId":"a1af4300-6c09-4916-ba8f-12f336176246","timestamp":1684422829730,"records":[{"data":"eyJDSEFOR0UiOi0wLjIzLCJQUklDRSI6NC44LCJUSUNLRVJfU1lNQk9MIjoiTkdDIiwiU0VDVE9SIjoiSEVBTFRIQ0FSRSJ9"}]}` diff --git a/internal/component/loki/source/aws_firehose/internal/handler.go b/internal/component/loki/source/aws_firehose/internal/handler.go index 38c98631c9..1a755065b0 100644 --- a/internal/component/loki/source/aws_firehose/internal/handler.go +++ b/internal/component/loki/source/aws_firehose/internal/handler.go @@ -15,14 +15,14 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component/common/loki" - lokiClient "github.com/grafana/agent/internal/component/common/loki/client" + "github.com/grafana/alloy/internal/component/common/loki" + lokiClient "github.com/grafana/alloy/internal/component/common/loki/client" ) const ( diff --git a/internal/component/loki/source/aws_firehose/internal/handler_test.go b/internal/component/loki/source/aws_firehose/internal/handler_test.go index 54848c9660..0f5a79a87f 100644 --- a/internal/component/loki/source/aws_firehose/internal/handler_test.go +++ b/internal/component/loki/source/aws_firehose/internal/handler_test.go @@ -22,7 +22,7 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) const ( diff --git a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go index fe1c9b451e..b96486aa55 100644 --- a/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go +++ b/internal/component/loki/source/azure_event_hubs/azure_event_hubs.go @@ -7,13 +7,13 @@ import ( "sync" "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/azure_event_hubs/internal/parser" - kt "github.com/grafana/agent/internal/component/loki/source/internal/kafkatarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/azure_event_hubs/internal/parser" + kt "github.com/grafana/alloy/internal/component/loki/source/internal/kafkatarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/azure_event_hubs/internal/parser/parser.go b/internal/component/loki/source/azure_event_hubs/internal/parser/parser.go index 227fad601a..214255a8a3 100644 --- a/internal/component/loki/source/azure_event_hubs/internal/parser/parser.go +++ b/internal/component/loki/source/azure_event_hubs/internal/parser/parser.go @@ -12,7 +12,7 @@ import ( "time" "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/cloudflare/cloudflare.go b/internal/component/loki/source/cloudflare/cloudflare.go index 2fcdc62ff0..3f8e514799 100644 --- a/internal/component/loki/source/cloudflare/cloudflare.go +++ b/internal/component/loki/source/cloudflare/cloudflare.go @@ -13,12 +13,12 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - cft "github.com/grafana/agent/internal/component/loki/source/cloudflare/internal/cloudflaretarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + cft "github.com/grafana/alloy/internal/component/loki/source/cloudflare/internal/cloudflaretarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go index dfe0d9a181..58a7aaf24f 100644 --- a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go +++ b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go @@ -13,9 +13,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/cloudflare-go" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/concurrency" diff --git a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target_test.go b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target_test.go index 5a0a54f2c3..58f7f56444 100644 --- a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target_test.go +++ b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target_test.go @@ -13,10 +13,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/common/loki/positions" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" diff --git a/internal/component/loki/source/docker/docker.go b/internal/component/loki/source/docker/docker.go index 809f8e1219..453e1667bb 100644 --- a/internal/component/loki/source/docker/docker.go +++ b/internal/component/loki/source/docker/docker.go @@ -15,16 +15,16 @@ import ( "github.com/docker/docker/client" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - types "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - dt "github.com/grafana/agent/internal/component/loki/source/docker/internal/dockertarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/component" + types "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + dt "github.com/grafana/alloy/internal/component/loki/source/docker/internal/dockertarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/docker/docker_test.go b/internal/component/loki/source/docker/docker_test.go index f83db37cfa..9e529764cf 100644 --- a/internal/component/loki/source/docker/docker_test.go +++ b/internal/component/loki/source/docker/docker_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/source/docker/internal/dockertarget/target.go b/internal/component/loki/source/docker/internal/dockertarget/target.go index 5e118fab98..7960484113 100644 --- a/internal/component/loki/source/docker/internal/dockertarget/target.go +++ b/internal/component/loki/source/docker/internal/dockertarget/target.go @@ -18,9 +18,9 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/docker/internal/dockertarget/target_test.go b/internal/component/loki/source/docker/internal/dockertarget/target_test.go index e32ecb1010..1997b90d93 100644 --- a/internal/component/loki/source/docker/internal/dockertarget/target_test.go +++ b/internal/component/loki/source/docker/internal/dockertarget/target_test.go @@ -13,13 +13,13 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/common/loki/positions" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/docker/runner.go b/internal/component/loki/source/docker/runner.go index 61fb9d6407..360df33a18 100644 --- a/internal/component/loki/source/docker/runner.go +++ b/internal/component/loki/source/docker/runner.go @@ -9,11 +9,11 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - dt "github.com/grafana/agent/internal/component/loki/source/docker/internal/dockertarget" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + dt "github.com/grafana/alloy/internal/component/loki/source/docker/internal/dockertarget" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/runner" ) // A manager manages a set of running tailers. diff --git a/internal/component/loki/source/file/decompresser.go b/internal/component/loki/source/file/decompresser.go index 85696f3676..6336a388c3 100644 --- a/internal/component/loki/source/file/decompresser.go +++ b/internal/component/loki/source/file/decompresser.go @@ -19,9 +19,9 @@ import ( "unsafe" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "go.uber.org/atomic" diff --git a/internal/component/loki/source/file/decompresser_test.go b/internal/component/loki/source/file/decompresser_test.go index d3ecc7d872..204fa55a0e 100644 --- a/internal/component/loki/source/file/decompresser_test.go +++ b/internal/component/loki/source/file/decompresser_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/internal/component/loki/source/file/file.go b/internal/component/loki/source/file/file.go index 3c418d9981..7bdb04048c 100644 --- a/internal/component/loki/source/file/file.go +++ b/internal/component/loki/source/file/file.go @@ -9,12 +9,12 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/tail/watch" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/file/file_test.go b/internal/component/loki/source/file/file_test.go index d7ca89b933..c1aa90912f 100644 --- a/internal/component/loki/source/file/file_test.go +++ b/internal/component/loki/source/file/file_test.go @@ -11,11 +11,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/source/file/tailer.go b/internal/component/loki/source/file/tailer.go index 94de433ae6..a3d8af4f66 100644 --- a/internal/component/loki/source/file/tailer.go +++ b/internal/component/loki/source/file/tailer.go @@ -12,9 +12,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/util" "github.com/grafana/tail" diff --git a/internal/component/loki/source/gcplog/gcplog.go b/internal/component/loki/source/gcplog/gcplog.go index f7ebd670ad..3f25c5c8c0 100644 --- a/internal/component/loki/source/gcplog/gcplog.go +++ b/internal/component/loki/source/gcplog/gcplog.go @@ -6,17 +6,17 @@ import ( "strings" "sync" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" - gt "github.com/grafana/agent/internal/component/loki/source/gcplog/internal/gcplogtarget" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" + gt "github.com/grafana/alloy/internal/component/loki/source/gcplog/internal/gcplogtarget" + "github.com/grafana/alloy/internal/util" ) func init() { diff --git a/internal/component/loki/source/gcplog/gcplog_test.go b/internal/component/loki/source/gcplog/gcplog_test.go index 1d54519369..12fa8c5f1a 100644 --- a/internal/component/loki/source/gcplog/gcplog_test.go +++ b/internal/component/loki/source/gcplog/gcplog_test.go @@ -14,12 +14,12 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/util" ) // TODO (@tpaschalis) We can't test this easily as there's no way to inject diff --git a/internal/component/loki/source/gcplog/gcptypes/gcptypes.go b/internal/component/loki/source/gcplog/gcptypes/gcptypes.go index dad7d8e0a9..20ecf42f14 100644 --- a/internal/component/loki/source/gcplog/gcptypes/gcptypes.go +++ b/internal/component/loki/source/gcplog/gcptypes/gcptypes.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - fnet "github.com/grafana/agent/internal/component/common/net" + fnet "github.com/grafana/alloy/internal/component/common/net" ) // PullConfig configures a GCPLog target with the 'pull' strategy. diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/formatter.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/formatter.go index 9085737c15..a1e036e44d 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/formatter.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/formatter.go @@ -16,7 +16,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) // GCPLogEntry that will be written to the pubsub topic according to the following spec. diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target.go index 67fbf13675..1a9536bb39 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target.go @@ -13,14 +13,14 @@ import ( "cloud.google.com/go/pubsub" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/backoff" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "google.golang.org/api/option" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" ) // PullTarget represents a target that scrapes logs from a GCP project id and diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target_test.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target_test.go index 669fc43031..9245a842ae 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target_test.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/pull_target_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" "gotest.tools/assert" - "github.com/grafana/agent/internal/component/common/loki/client/fake" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" ) func TestPullTarget_RunStop(t *testing.T) { diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target.go index c4091693ee..c0a013a035 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target.go @@ -14,14 +14,14 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" ) // PushTarget defines a server for receiving messages from a GCP PubSub push diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target_test.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target_test.go index 68886b33c2..2b7c1462c4 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target_test.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_target_test.go @@ -16,10 +16,10 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/client/fake" - fnet "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" + fnet "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" ) const localhost = "127.0.0.1" diff --git a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_translation.go b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_translation.go index 624c0678be..d7b6a22f60 100644 --- a/internal/component/loki/source/gcplog/internal/gcplogtarget/push_translation.go +++ b/internal/component/loki/source/gcplog/internal/gcplogtarget/push_translation.go @@ -15,7 +15,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) // ReservedLabelTenantID reserved to override the tenant ID while processing diff --git a/internal/component/loki/source/gelf/gelf.go b/internal/component/loki/source/gelf/gelf.go index ea889322d3..0e1343654e 100644 --- a/internal/component/loki/source/gelf/gelf.go +++ b/internal/component/loki/source/gelf/gelf.go @@ -4,11 +4,11 @@ import ( "context" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/gelf/internal/target" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/gelf/internal/target" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/gelf/gelf_test.go b/internal/component/loki/source/gelf/gelf_test.go index 0e1104717c..b948528d1c 100644 --- a/internal/component/loki/source/gelf/gelf_test.go +++ b/internal/component/loki/source/gelf/gelf_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/source/gelf/internal/target/gelftarget.go b/internal/component/loki/source/gelf/internal/target/gelftarget.go index 5868f59ba2..a1dce76315 100644 --- a/internal/component/loki/source/gelf/internal/target/gelftarget.go +++ b/internal/component/loki/source/gelf/internal/target/gelftarget.go @@ -11,10 +11,10 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/go-gelf/v2/gelf" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/heroku/heroku.go b/internal/component/loki/source/heroku/heroku.go index d551b1cf80..ca8015fe06 100644 --- a/internal/component/loki/source/heroku/heroku.go +++ b/internal/component/loki/source/heroku/heroku.go @@ -5,14 +5,14 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - ht "github.com/grafana/agent/internal/component/loki/source/heroku/internal/herokutarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + ht "github.com/grafana/alloy/internal/component/loki/source/heroku/internal/herokutarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/heroku/heroku_test.go b/internal/component/loki/source/heroku/heroku_test.go index 8a6692aa09..a562e10ab1 100644 --- a/internal/component/loki/source/heroku/heroku_test.go +++ b/internal/component/loki/source/heroku/heroku_test.go @@ -8,12 +8,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/heroku/internal/herokutarget" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/heroku/internal/herokutarget" + "github.com/grafana/alloy/internal/util" "github.com/grafana/regexp" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/source/heroku/internal/herokutarget/herokutarget.go b/internal/component/loki/source/heroku/internal/herokutarget/herokutarget.go index 903285e16a..93725f938f 100644 --- a/internal/component/loki/source/heroku/internal/herokutarget/herokutarget.go +++ b/internal/component/loki/source/heroku/internal/herokutarget/herokutarget.go @@ -12,9 +12,9 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/component/common/loki" - fnet "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + fnet "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/pkg/logproto" herokuEncoding "github.com/heroku/x/logplex/encoding" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/source/heroku/internal/herokutarget/target_test.go b/internal/component/loki/source/heroku/internal/herokutarget/target_test.go index 4da6704e16..615564e1cc 100644 --- a/internal/component/loki/source/heroku/internal/herokutarget/target_test.go +++ b/internal/component/loki/source/heroku/internal/herokutarget/target_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/go-kit/log" "github.com/google/uuid" @@ -23,7 +23,7 @@ import ( "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" - fnet "github.com/grafana/agent/internal/component/common/net" + fnet "github.com/grafana/alloy/internal/component/common/net" ) const localhost = "127.0.0.1" diff --git a/internal/component/loki/source/internal/kafkatarget/config.go b/internal/component/loki/source/internal/kafkatarget/config.go index 1001bae451..01f4dc28cf 100644 --- a/internal/component/loki/source/internal/kafkatarget/config.go +++ b/internal/component/loki/source/internal/kafkatarget/config.go @@ -2,7 +2,7 @@ package kafkatarget import ( "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/dskit/flagext" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/internal/kafkatarget/consumer.go b/internal/component/loki/source/internal/kafkatarget/consumer.go index f22c8e2b7c..74fe76bb2a 100644 --- a/internal/component/loki/source/internal/kafkatarget/consumer.go +++ b/internal/component/loki/source/internal/kafkatarget/consumer.go @@ -12,7 +12,7 @@ import ( "github.com/IBM/sarama" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/backoff" "github.com/grafana/loki/clients/pkg/promtail/targets/target" diff --git a/internal/component/loki/source/internal/kafkatarget/kafkatarget.go b/internal/component/loki/source/internal/kafkatarget/kafkatarget.go index b0c90c0a6b..3bf5662db8 100644 --- a/internal/component/loki/source/internal/kafkatarget/kafkatarget.go +++ b/internal/component/loki/source/internal/kafkatarget/kafkatarget.go @@ -10,8 +10,8 @@ import ( "github.com/IBM/sarama" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/loki/clients/pkg/promtail/targets/target" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/internal/kafkatarget/kafkatarget_test.go b/internal/component/loki/source/internal/kafkatarget/kafkatarget_test.go index 6d90d94ab0..672aba066c 100644 --- a/internal/component/loki/source/internal/kafkatarget/kafkatarget_test.go +++ b/internal/component/loki/source/internal/kafkatarget/kafkatarget_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/IBM/sarama" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/internal/kafkatarget/parser.go b/internal/component/loki/source/internal/kafkatarget/parser.go index f4b7baed83..e2c3e315b1 100644 --- a/internal/component/loki/source/internal/kafkatarget/parser.go +++ b/internal/component/loki/source/internal/kafkatarget/parser.go @@ -2,7 +2,7 @@ package kafkatarget import ( "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/internal/kafkatarget/target_syncer.go b/internal/component/loki/source/internal/kafkatarget/target_syncer.go index dd58766f9f..9514d39138 100644 --- a/internal/component/loki/source/internal/kafkatarget/target_syncer.go +++ b/internal/component/loki/source/internal/kafkatarget/target_syncer.go @@ -13,14 +13,14 @@ import ( "github.com/IBM/sarama" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/clients/pkg/promtail/targets/target" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) var TopicPollInterval = 30 * time.Second diff --git a/internal/component/loki/source/internal/kafkatarget/target_syncer_test.go b/internal/component/loki/source/internal/kafkatarget/target_syncer_test.go index 4c11f46d8c..b5c6ea02f9 100644 --- a/internal/component/loki/source/internal/kafkatarget/target_syncer_test.go +++ b/internal/component/loki/source/internal/kafkatarget/target_syncer_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/grafana/dskit/flagext" "github.com/prometheus/common/config" diff --git a/internal/component/loki/source/journal/internal/target/journaltarget.go b/internal/component/loki/source/journal/internal/target/journaltarget.go index 88377ba426..b7dbeea4dc 100644 --- a/internal/component/loki/source/journal/internal/target/journaltarget.go +++ b/internal/component/loki/source/journal/internal/target/journaltarget.go @@ -13,12 +13,12 @@ import ( "syscall" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" "github.com/coreos/go-systemd/sdjournal" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/journal/internal/target/journaltarget_test.go b/internal/component/loki/source/journal/internal/target/journaltarget_test.go index e8bd2653e3..d11d26217c 100644 --- a/internal/component/loki/source/journal/internal/target/journaltarget_test.go +++ b/internal/component/loki/source/journal/internal/target/journaltarget_test.go @@ -14,11 +14,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/coreos/go-systemd/sdjournal" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/common/loki/positions" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/component/loki/source/journal/journal.go b/internal/component/loki/source/journal/journal.go index 7bdb5b3127..971f6bfe4e 100644 --- a/internal/component/loki/source/journal/journal.go +++ b/internal/component/loki/source/journal/journal.go @@ -9,15 +9,15 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/journal/internal/target" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/journal/internal/target" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/prometheus/common/model" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" ) func init() { diff --git a/internal/component/loki/source/journal/journal_stub.go b/internal/component/loki/source/journal/journal_stub.go index a18ec9f420..10cc145181 100644 --- a/internal/component/loki/source/journal/journal_stub.go +++ b/internal/component/loki/source/journal/journal_stub.go @@ -5,9 +5,9 @@ package journal import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/loki/source/journal/journal_test.go b/internal/component/loki/source/journal/journal_test.go index 525d982998..d4da349bdb 100644 --- a/internal/component/loki/source/journal/journal_test.go +++ b/internal/component/loki/source/journal/journal_test.go @@ -9,9 +9,9 @@ import ( "time" "github.com/coreos/go-systemd/v22/journal" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/component/loki/source/journal/types.go b/internal/component/loki/source/journal/types.go index 648f7e7cfd..d4b1bd673b 100644 --- a/internal/component/loki/source/journal/types.go +++ b/internal/component/loki/source/journal/types.go @@ -3,8 +3,8 @@ package journal import ( "time" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" ) // Arguments are the arguments for the component. diff --git a/internal/component/loki/source/kafka/kafka.go b/internal/component/loki/source/kafka/kafka.go index a22c3141a5..d44c8369df 100644 --- a/internal/component/loki/source/kafka/kafka.go +++ b/internal/component/loki/source/kafka/kafka.go @@ -5,13 +5,13 @@ import ( "sync" "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - kt "github.com/grafana/agent/internal/component/loki/source/internal/kafkatarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + kt "github.com/grafana/alloy/internal/component/loki/source/internal/kafkatarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/kubernetes/kubernetes.go b/internal/component/loki/source/kubernetes/kubernetes.go index 012e9082a8..b32942417c 100644 --- a/internal/component/loki/source/kubernetes/kubernetes.go +++ b/internal/component/loki/source/kubernetes/kubernetes.go @@ -11,15 +11,15 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - commonk8s "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/loki/source/kubernetes/kubetail" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component" + commonk8s "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/loki/source/kubernetes/kubetail" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" "k8s.io/client-go/kubernetes" ) diff --git a/internal/component/loki/source/kubernetes/kubetail/kubetail.go b/internal/component/loki/source/kubernetes/kubetail/kubetail.go index 10c5d64ee0..c40d44978b 100644 --- a/internal/component/loki/source/kubernetes/kubetail/kubetail.go +++ b/internal/component/loki/source/kubernetes/kubetail/kubetail.go @@ -6,10 +6,10 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/runner" "k8s.io/client-go/kubernetes" ) diff --git a/internal/component/loki/source/kubernetes/kubetail/tailer.go b/internal/component/loki/source/kubernetes/kubetail/tailer.go index 4e30ab511a..477879c7c4 100644 --- a/internal/component/loki/source/kubernetes/kubetail/tailer.go +++ b/internal/component/loki/source/kubernetes/kubetail/tailer.go @@ -10,9 +10,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/runner" "github.com/grafana/dskit/backoff" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" diff --git a/internal/component/loki/source/kubernetes_events/event_controller.go b/internal/component/loki/source/kubernetes_events/event_controller.go index 1a1bd4fdf4..686fa0ad8e 100644 --- a/internal/component/loki/source/kubernetes_events/event_controller.go +++ b/internal/component/loki/source/kubernetes_events/event_controller.go @@ -10,10 +10,10 @@ import ( "github.com/cespare/xxhash/v2" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/runner" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" corev1 "k8s.io/api/core/v1" diff --git a/internal/component/loki/source/kubernetes_events/kubernetes_events.go b/internal/component/loki/source/kubernetes_events/kubernetes_events.go index 0f860d789b..40e4878289 100644 --- a/internal/component/loki/source/kubernetes_events/kubernetes_events.go +++ b/internal/component/loki/source/kubernetes_events/kubernetes_events.go @@ -12,13 +12,13 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/runner" "github.com/oklog/run" "k8s.io/client-go/rest" ) diff --git a/internal/component/loki/source/podlogs/controller.go b/internal/component/loki/source/podlogs/controller.go index bf71b40c8f..0abdeea595 100644 --- a/internal/component/loki/source/podlogs/controller.go +++ b/internal/component/loki/source/podlogs/controller.go @@ -8,8 +8,8 @@ import ( "time" "github.com/go-kit/log" - monitoringv1alpha2 "github.com/grafana/agent/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2" - "github.com/grafana/agent/internal/flow/logging/level" + monitoringv1alpha2 "github.com/grafana/alloy/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2" + "github.com/grafana/alloy/internal/flow/logging/level" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" diff --git a/internal/component/loki/source/podlogs/podlogs.go b/internal/component/loki/source/podlogs/podlogs.go index 94323cc16a..4ec3bffba0 100644 --- a/internal/component/loki/source/podlogs/podlogs.go +++ b/internal/component/loki/source/podlogs/podlogs.go @@ -10,16 +10,16 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - commonk8s "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/positions" - "github.com/grafana/agent/internal/component/loki/source/kubernetes" - "github.com/grafana/agent/internal/component/loki/source/kubernetes/kubetail" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + commonk8s "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/positions" + "github.com/grafana/alloy/internal/component/loki/source/kubernetes" + "github.com/grafana/alloy/internal/component/loki/source/kubernetes/kubetail" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" "github.com/oklog/run" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" diff --git a/internal/component/loki/source/podlogs/reconciler.go b/internal/component/loki/source/podlogs/reconciler.go index 7f79807b6f..068dcdce06 100644 --- a/internal/component/loki/source/podlogs/reconciler.go +++ b/internal/component/loki/source/podlogs/reconciler.go @@ -9,10 +9,10 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/loki/source/kubernetes/kubetail" - monitoringv1alpha2 "github.com/grafana/agent/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component/loki/source/kubernetes/kubetail" + monitoringv1alpha2 "github.com/grafana/alloy/internal/component/loki/source/podlogs/internal/apis/monitoring/v1alpha2" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" "github.com/grafana/ckit/shard" "github.com/prometheus/common/model" promlabels "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget.go b/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget.go index 087b59796a..d48512b31d 100644 --- a/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget.go +++ b/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget.go @@ -12,14 +12,14 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/influxdata/go-syslog/v3" "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/grafana/loki/clients/pkg/promtail/targets/target" diff --git a/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget_test.go b/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget_test.go index 5754fb9693..40e0ccde4e 100644 --- a/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget_test.go +++ b/internal/component/loki/source/syslog/internal/syslogtarget/syslogtarget_test.go @@ -15,7 +15,7 @@ import ( "time" "unicode/utf8" - "github.com/grafana/agent/internal/component/common/loki/client/fake" + "github.com/grafana/alloy/internal/component/common/loki/client/fake" "github.com/go-kit/log" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" diff --git a/internal/component/loki/source/syslog/internal/syslogtarget/transport.go b/internal/component/loki/source/syslog/internal/syslogtarget/transport.go index d7360ecef4..6539b4594b 100644 --- a/internal/component/loki/source/syslog/internal/syslogtarget/transport.go +++ b/internal/component/loki/source/syslog/internal/syslogtarget/transport.go @@ -21,7 +21,7 @@ import ( "github.com/mwitkow/go-conntrack" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/influxdata/go-syslog/v3" "github.com/prometheus/common/config" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/loki/source/syslog/syslog.go b/internal/component/loki/source/syslog/syslog.go index 296a75453c..03a0150291 100644 --- a/internal/component/loki/source/syslog/syslog.go +++ b/internal/component/loki/source/syslog/syslog.go @@ -5,12 +5,12 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - st "github.com/grafana/agent/internal/component/loki/source/syslog/internal/syslogtarget" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + st "github.com/grafana/alloy/internal/component/loki/source/syslog/internal/syslogtarget" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/prometheus/model/relabel" ) diff --git a/internal/component/loki/source/syslog/syslog_test.go b/internal/component/loki/source/syslog/syslog_test.go index 481cd5a5c2..a6dc560035 100644 --- a/internal/component/loki/source/syslog/syslog_test.go +++ b/internal/component/loki/source/syslog/syslog_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/util" "github.com/grafana/regexp" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/loki/source/syslog/types.go b/internal/component/loki/source/syslog/types.go index afc617ac32..18d4300df7 100644 --- a/internal/component/loki/source/syslog/types.go +++ b/internal/component/loki/source/syslog/types.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component/common/config" - st "github.com/grafana/agent/internal/component/loki/source/syslog/internal/syslogtarget" + "github.com/grafana/alloy/internal/component/common/config" + st "github.com/grafana/alloy/internal/component/loki/source/syslog/internal/syslogtarget" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/prometheus/common/model" ) diff --git a/internal/component/loki/source/windowsevent/arguments.go b/internal/component/loki/source/windowsevent/arguments.go index eff4af2889..ae08fc1a00 100644 --- a/internal/component/loki/source/windowsevent/arguments.go +++ b/internal/component/loki/source/windowsevent/arguments.go @@ -7,7 +7,7 @@ package windowsevent import ( "time" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) // Arguments holds values which are used to configure the loki.source.windowsevent diff --git a/internal/component/loki/source/windowsevent/component_stub.go b/internal/component/loki/source/windowsevent/component_stub.go index bc7eba8ccb..6081c08e78 100644 --- a/internal/component/loki/source/windowsevent/component_stub.go +++ b/internal/component/loki/source/windowsevent/component_stub.go @@ -5,9 +5,9 @@ package windowsevent import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/loki/source/windowsevent/component_test.go b/internal/component/loki/source/windowsevent/component_test.go index 49488e0313..45a4f15d50 100644 --- a/internal/component/loki/source/windowsevent/component_test.go +++ b/internal/component/loki/source/windowsevent/component_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/loki/source/windowsevent/component_windows.go b/internal/component/loki/source/windowsevent/component_windows.go index 58a4a62234..19d3f9b0e6 100644 --- a/internal/component/loki/source/windowsevent/component_windows.go +++ b/internal/component/loki/source/windowsevent/component_windows.go @@ -6,10 +6,10 @@ import ( "path" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/utils" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/utils" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" ) diff --git a/internal/component/loki/write/types.go b/internal/component/loki/write/types.go index 9c73c88fde..943201893a 100644 --- a/internal/component/loki/write/types.go +++ b/internal/component/loki/write/types.go @@ -5,11 +5,11 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component/common/loki/client" - "github.com/grafana/agent/internal/component/common/loki/utils" + "github.com/grafana/alloy/internal/component/common/loki/client" + "github.com/grafana/alloy/internal/component/common/loki/utils" "github.com/alecthomas/units" - types "github.com/grafana/agent/internal/component/common/config" + types "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" lokiflagext "github.com/grafana/loki/pkg/util/flagext" diff --git a/internal/component/loki/write/write.go b/internal/component/loki/write/write.go index 59b1d8560f..909214393c 100644 --- a/internal/component/loki/write/write.go +++ b/internal/component/loki/write/write.go @@ -7,13 +7,13 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/client" - "github.com/grafana/agent/internal/component/common/loki/limit" - "github.com/grafana/agent/internal/component/common/loki/wal" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/client" + "github.com/grafana/alloy/internal/component/common/loki/limit" + "github.com/grafana/alloy/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/loki/write/write_test.go b/internal/component/loki/write/write_test.go index 8a4a366e71..63bdfb64b8 100644 --- a/internal/component/loki/write/write_test.go +++ b/internal/component/loki/write/write_test.go @@ -10,12 +10,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/loki/wal" - "github.com/grafana/agent/internal/component/discovery" - lsf "github.com/grafana/agent/internal/component/loki/source/file" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki/wal" + "github.com/grafana/alloy/internal/component/discovery" + lsf "github.com/grafana/alloy/internal/component/loki/source/file" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/internal/component/metadata/metadata.go b/internal/component/metadata/metadata.go index b566bd9139..768d9d95c0 100644 --- a/internal/component/metadata/metadata.go +++ b/internal/component/metadata/metadata.go @@ -4,12 +4,12 @@ import ( "fmt" "reflect" - "github.com/grafana/agent/internal/component" - _ "github.com/grafana/agent/internal/component/all" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component" + _ "github.com/grafana/alloy/internal/component/all" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/pyroscope" "github.com/prometheus/prometheus/storage" ) diff --git a/internal/component/mimir/rules/kubernetes/events.go b/internal/component/mimir/rules/kubernetes/events.go index 7752077d97..94f2155e56 100644 --- a/internal/component/mimir/rules/kubernetes/events.go +++ b/internal/component/mimir/rules/kubernetes/events.go @@ -6,8 +6,8 @@ import ( "regexp" "time" - "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/hashicorp/go-multierror" promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/internal/component/mimir/rules/kubernetes/events_test.go b/internal/component/mimir/rules/kubernetes/events_test.go index e177e41bd1..8b16ef78d1 100644 --- a/internal/component/mimir/rules/kubernetes/events_test.go +++ b/internal/component/mimir/rules/kubernetes/events_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/kubernetes" - mimirClient "github.com/grafana/agent/internal/mimir/client" + "github.com/grafana/alloy/internal/component/common/kubernetes" + mimirClient "github.com/grafana/alloy/internal/mimir/client" v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promListers "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" "github.com/prometheus/prometheus/model/rulefmt" diff --git a/internal/component/mimir/rules/kubernetes/health.go b/internal/component/mimir/rules/kubernetes/health.go index 101bfd1d0a..0b29d899f2 100644 --- a/internal/component/mimir/rules/kubernetes/health.go +++ b/internal/component/mimir/rules/kubernetes/health.go @@ -3,7 +3,7 @@ package rules import ( "time" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" ) func (c *Component) reportUnhealthy(err error) { diff --git a/internal/component/mimir/rules/kubernetes/rules.go b/internal/component/mimir/rules/kubernetes/rules.go index e6e6d03d8f..1f3106cc9a 100644 --- a/internal/component/mimir/rules/kubernetes/rules.go +++ b/internal/component/mimir/rules/kubernetes/rules.go @@ -7,11 +7,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - commonK8s "github.com/grafana/agent/internal/component/common/kubernetes" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - mimirClient "github.com/grafana/agent/internal/mimir/client" + "github.com/grafana/alloy/internal/component" + commonK8s "github.com/grafana/alloy/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + mimirClient "github.com/grafana/alloy/internal/mimir/client" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/instrument" promListers "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" diff --git a/internal/component/mimir/rules/kubernetes/types.go b/internal/component/mimir/rules/kubernetes/types.go index 701751a1db..ccdbe2b8c1 100644 --- a/internal/component/mimir/rules/kubernetes/types.go +++ b/internal/component/mimir/rules/kubernetes/types.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/kubernetes" ) type Arguments struct { diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index f1211c2e6f..6220f8fe54 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -9,11 +9,11 @@ import ( "context" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/auth/auth_test.go b/internal/component/otelcol/auth/auth_test.go index 9bc8af90fe..70bd555564 100644 --- a/internal/component/otelcol/auth/auth_test.go +++ b/internal/component/otelcol/auth/auth_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/auth/basic/basic.go b/internal/component/otelcol/auth/basic/basic.go index 8432724832..25c6b55ac5 100644 --- a/internal/component/otelcol/auth/basic/basic.go +++ b/internal/component/otelcol/auth/basic/basic.go @@ -2,9 +2,9 @@ package basic import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/auth/basic/basic_test.go b/internal/component/otelcol/auth/basic/basic_test.go index aaad0abaa6..ee2804aa83 100644 --- a/internal/component/otelcol/auth/basic/basic_test.go +++ b/internal/component/otelcol/auth/basic/basic_test.go @@ -7,10 +7,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/auth/basic" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth/basic" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/auth/bearer/bearer.go b/internal/component/otelcol/auth/bearer/bearer.go index 43813c48b5..41ad2f779a 100644 --- a/internal/component/otelcol/auth/bearer/bearer.go +++ b/internal/component/otelcol/auth/bearer/bearer.go @@ -2,9 +2,9 @@ package bearer import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/auth/bearer/bearer_test.go b/internal/component/otelcol/auth/bearer/bearer_test.go index 7168980b63..bdbec3674d 100644 --- a/internal/component/otelcol/auth/bearer/bearer_test.go +++ b/internal/component/otelcol/auth/bearer/bearer_test.go @@ -7,10 +7,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/auth/bearer" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth/bearer" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/auth/headers/headers.go b/internal/component/otelcol/auth/headers/headers.go index f79f8506ca..21aeec00fd 100644 --- a/internal/component/otelcol/auth/headers/headers.go +++ b/internal/component/otelcol/auth/headers/headers.go @@ -6,9 +6,9 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" diff --git a/internal/component/otelcol/auth/headers/headers_test.go b/internal/component/otelcol/auth/headers/headers_test.go index 0c9a586491..68a5a29db6 100644 --- a/internal/component/otelcol/auth/headers/headers_test.go +++ b/internal/component/otelcol/auth/headers/headers_test.go @@ -7,10 +7,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/auth/headers" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth/headers" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "github.com/stretchr/testify/assert" diff --git a/internal/component/otelcol/auth/oauth2/oauth2.go b/internal/component/otelcol/auth/oauth2/oauth2.go index 438096c4a4..6f1ae38663 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2.go +++ b/internal/component/otelcol/auth/oauth2/oauth2.go @@ -4,10 +4,10 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/auth/oauth2/oauth2_test.go b/internal/component/otelcol/auth/oauth2/oauth2_test.go index 7de2719d73..8a0367ddef 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2_test.go +++ b/internal/component/otelcol/auth/oauth2/oauth2_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth/oauth2" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" extauth "go.opentelemetry.io/collector/extension/auth" diff --git a/internal/component/otelcol/auth/sigv4/sigv4.go b/internal/component/otelcol/auth/sigv4/sigv4.go index e40f08352a..636a4accc9 100644 --- a/internal/component/otelcol/auth/sigv4/sigv4.go +++ b/internal/component/otelcol/auth/sigv4/sigv4.go @@ -1,9 +1,9 @@ package sigv4 import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/auth/sigv4/sigv4_test.go b/internal/component/otelcol/auth/sigv4/sigv4_test.go index a7db4e7ffe..05bf871867 100644 --- a/internal/component/otelcol/auth/sigv4/sigv4_test.go +++ b/internal/component/otelcol/auth/sigv4/sigv4_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/auth/sigv4" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth/sigv4" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/config_attraction_test.go b/internal/component/otelcol/config_attraction_test.go index 54d740b8b7..5879bde815 100644 --- a/internal/component/otelcol/config_attraction_test.go +++ b/internal/component/otelcol/config_attraction_test.go @@ -3,7 +3,7 @@ package otelcol_test import ( "testing" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/config_filter_test.go b/internal/component/otelcol/config_filter_test.go index b55e02227b..2b57cde73a 100644 --- a/internal/component/otelcol/config_filter_test.go +++ b/internal/component/otelcol/config_filter_test.go @@ -5,7 +5,7 @@ import ( "k8s.io/utils/ptr" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/config_grpc.go b/internal/component/otelcol/config_grpc.go index 8d45332554..b2866aaa9e 100644 --- a/internal/component/otelcol/config_grpc.go +++ b/internal/component/otelcol/config_grpc.go @@ -4,7 +4,7 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth" otelcomponent "go.opentelemetry.io/collector/component" otelconfigauth "go.opentelemetry.io/collector/config/configauth" otelconfiggrpc "go.opentelemetry.io/collector/config/configgrpc" diff --git a/internal/component/otelcol/config_http.go b/internal/component/otelcol/config_http.go index 934a2a488c..f8a5554d4a 100644 --- a/internal/component/otelcol/config_http.go +++ b/internal/component/otelcol/config_http.go @@ -4,7 +4,7 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/auth" otelcomponent "go.opentelemetry.io/collector/component" otelconfigauth "go.opentelemetry.io/collector/config/configauth" otelconfighttp "go.opentelemetry.io/collector/config/confighttp" diff --git a/internal/component/otelcol/connector/connector.go b/internal/component/otelcol/connector/connector.go index 546465641a..63a1a38979 100644 --- a/internal/component/otelcol/connector/connector.go +++ b/internal/component/otelcol/connector/connector.go @@ -7,14 +7,14 @@ import ( "errors" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelconnector "go.opentelemetry.io/collector/connector" diff --git a/internal/component/otelcol/connector/host_info/host_info.go b/internal/component/otelcol/connector/host_info/host_info.go index c756b80bfd..de15f89bab 100644 --- a/internal/component/otelcol/connector/host_info/host_info.go +++ b/internal/component/otelcol/connector/host_info/host_info.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/connector" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/connector" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index 1d0298f45a..e48b6e7aab 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/connector" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/connector" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go index 390c07bd2f..5b9b129809 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" + "github.com/grafana/alloy/internal/component/otelcol/connector/servicegraph" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs.go b/internal/component/otelcol/connector/spanlogs/spanlogs.go index bc749c02d8..10249d1bea 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs.go @@ -5,12 +5,12 @@ import ( "context" "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go index 093362aeeb..91fa3d5ee4 100644 --- a/internal/component/otelcol/connector/spanlogs/spanlogs_test.go +++ b/internal/component/otelcol/connector/spanlogs/spanlogs_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/connector/spanlogs" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/connector/spanlogs" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index 4de3a55f12..9ad227fcd3 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/connector" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/connector" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go index 5a99a51de0..453bd91652 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/connector/spanmetrics" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/connector/spanmetrics" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/exporter/exporter.go b/internal/component/otelcol/exporter/exporter.go index 2524070aff..3cbdfa3fa3 100644 --- a/internal/component/otelcol/exporter/exporter.go +++ b/internal/component/otelcol/exporter/exporter.go @@ -7,14 +7,14 @@ import ( "errors" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/component/otelcol/internal/views" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/component/otelcol/internal/views" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelexporter "go.opentelemetry.io/collector/exporter" diff --git a/internal/component/otelcol/exporter/exporter_test.go b/internal/component/otelcol/exporter/exporter_test.go index dd61355ec6..5498858044 100644 --- a/internal/component/otelcol/exporter/exporter_test.go +++ b/internal/component/otelcol/exporter/exporter_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" otelconsumer "go.opentelemetry.io/collector/consumer" diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index 70bad94765..2757441959 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -6,11 +6,11 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/exporter" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/exporter" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index 9b04ba1b23..9300f9c5f9 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter/loadbalancing" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/exporter/logging/logging.go b/internal/component/otelcol/exporter/logging/logging.go index 3e09afa09d..45cff77cde 100644 --- a/internal/component/otelcol/exporter/logging/logging.go +++ b/internal/component/otelcol/exporter/logging/logging.go @@ -2,10 +2,10 @@ package logging import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" loggingexporter "go.opentelemetry.io/collector/exporter/loggingexporter" diff --git a/internal/component/otelcol/exporter/loki/internal/convert/convert.go b/internal/component/otelcol/exporter/loki/internal/convert/convert.go index 63446b8f3e..69ffff6edb 100644 --- a/internal/component/otelcol/exporter/loki/internal/convert/convert.go +++ b/internal/component/otelcol/exporter/loki/internal/convert/convert.go @@ -13,8 +13,8 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging/level" loki_translator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/collector/consumer" diff --git a/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go b/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go index 016b8929fe..b7e8fbec10 100644 --- a/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go +++ b/internal/component/otelcol/exporter/loki/internal/convert/convert_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol/exporter/loki/internal/convert" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol/exporter/loki/internal/convert" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/loki/pkg/push" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/otelcol/exporter/loki/loki.go b/internal/component/otelcol/exporter/loki/loki.go index 5809d7edfc..983b3f0b1a 100644 --- a/internal/component/otelcol/exporter/loki/loki.go +++ b/internal/component/otelcol/exporter/loki/loki.go @@ -5,12 +5,12 @@ import ( "context" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter/loki/internal/convert" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter/loki/internal/convert" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/otelcol/exporter/otlp/otlp.go b/internal/component/otelcol/exporter/otlp/otlp.go index cd46c29901..56cd2835fd 100644 --- a/internal/component/otelcol/exporter/otlp/otlp.go +++ b/internal/component/otelcol/exporter/otlp/otlp.go @@ -4,10 +4,10 @@ package otlp import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" otelpexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter" diff --git a/internal/component/otelcol/exporter/otlp/otlp_test.go b/internal/component/otelcol/exporter/otlp/otlp_test.go index 88fa01e7b1..23ed07ad29 100644 --- a/internal/component/otelcol/exporter/otlp/otlp_test.go +++ b/internal/component/otelcol/exporter/otlp/otlp_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter/otlp" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter/otlp" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp.go b/internal/component/otelcol/exporter/otlphttp/otlphttp.go index 6457d379ca..bb9124dbae 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp.go @@ -5,10 +5,10 @@ import ( "errors" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/otlphttpexporter" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go index 1e49d8e63e..2277e0631f 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp_test.go @@ -8,11 +8,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter/otlphttp" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter/otlphttp" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go b/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go index a8f4d4a91c..dc3a218f38 100644 --- a/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go +++ b/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go @@ -18,7 +18,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" diff --git a/internal/component/otelcol/exporter/prometheus/internal/convert/convert_test.go b/internal/component/otelcol/exporter/prometheus/internal/convert/convert_test.go index 37d001b9d5..08718deab9 100644 --- a/internal/component/otelcol/exporter/prometheus/internal/convert/convert_test.go +++ b/internal/component/otelcol/exporter/prometheus/internal/convert/convert_test.go @@ -5,9 +5,9 @@ import ( "encoding/json" "testing" - "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus/internal/convert" - "github.com/grafana/agent/internal/util" - "github.com/grafana/agent/internal/util/testappender" + "github.com/grafana/alloy/internal/component/otelcol/exporter/prometheus/internal/convert" + "github.com/grafana/alloy/internal/util" + "github.com/grafana/alloy/internal/util/testappender" "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pmetric" diff --git a/internal/component/otelcol/exporter/prometheus/prometheus.go b/internal/component/otelcol/exporter/prometheus/prometheus.go index f85f566bb0..0150b03fad 100644 --- a/internal/component/otelcol/exporter/prometheus/prometheus.go +++ b/internal/component/otelcol/exporter/prometheus/prometheus.go @@ -8,13 +8,13 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus/internal/convert" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/exporter/prometheus/internal/convert" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/prometheus/storage" ) diff --git a/internal/component/otelcol/exporter/prometheus/prometheus_test.go b/internal/component/otelcol/exporter/prometheus/prometheus_test.go index f02b18f23b..3945c3ab96 100644 --- a/internal/component/otelcol/exporter/prometheus/prometheus_test.go +++ b/internal/component/otelcol/exporter/prometheus/prometheus_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus" + "github.com/grafana/alloy/internal/component/otelcol/exporter/prometheus" "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/extension/extension.go b/internal/component/otelcol/extension/extension.go index 8bdafad806..a832e7cc32 100644 --- a/internal/component/otelcol/extension/extension.go +++ b/internal/component/otelcol/extension/extension.go @@ -9,11 +9,11 @@ import ( "context" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/extension/extension_test.go b/internal/component/otelcol/extension/extension_test.go index fb83244343..ed5d6875c1 100644 --- a/internal/component/otelcol/extension/extension_test.go +++ b/internal/component/otelcol/extension/extension_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/extension" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/extension" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go index 84b8ca8a9f..34aa2fc41b 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go @@ -24,8 +24,8 @@ import ( otelextension "go.opentelemetry.io/collector/extension" "go.uber.org/zap" - "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal" - "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling/internal/strategy_store" + "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal" + "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling/internal/strategy_store" ) var _ otelextension.Extension = (*jrsExtension)(nil) diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go index 815e734a82..babc653419 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go @@ -4,11 +4,11 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/extension" - "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/extension" + "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" ) diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go index 159ba0ccf6..2446466af4 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/internal/fakeconsumer/fake.go b/internal/component/otelcol/internal/fakeconsumer/fake.go index e90f508e65..c91a91e1ff 100644 --- a/internal/component/otelcol/internal/fakeconsumer/fake.go +++ b/internal/component/otelcol/internal/fakeconsumer/fake.go @@ -3,7 +3,7 @@ package fakeconsumer import ( "context" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" otelconsumer "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" diff --git a/internal/component/otelcol/internal/fanoutconsumer/logs.go b/internal/component/otelcol/internal/fanoutconsumer/logs.go index 1b6dc18945..6fc527bf01 100644 --- a/internal/component/otelcol/internal/fanoutconsumer/logs.go +++ b/internal/component/otelcol/internal/fanoutconsumer/logs.go @@ -9,7 +9,7 @@ package fanoutconsumer import ( "context" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" otelconsumer "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" diff --git a/internal/component/otelcol/internal/fanoutconsumer/metrics.go b/internal/component/otelcol/internal/fanoutconsumer/metrics.go index 4ace8e35a4..3669131b01 100644 --- a/internal/component/otelcol/internal/fanoutconsumer/metrics.go +++ b/internal/component/otelcol/internal/fanoutconsumer/metrics.go @@ -9,7 +9,7 @@ package fanoutconsumer import ( "context" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" otelconsumer "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" diff --git a/internal/component/otelcol/internal/fanoutconsumer/traces.go b/internal/component/otelcol/internal/fanoutconsumer/traces.go index 51403b6a04..785e286d5e 100644 --- a/internal/component/otelcol/internal/fanoutconsumer/traces.go +++ b/internal/component/otelcol/internal/fanoutconsumer/traces.go @@ -9,7 +9,7 @@ package fanoutconsumer import ( "context" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" otelconsumer "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" diff --git a/internal/component/otelcol/internal/scheduler/host.go b/internal/component/otelcol/internal/scheduler/host.go index 34bf664c63..aec74c35e9 100644 --- a/internal/component/otelcol/internal/scheduler/host.go +++ b/internal/component/otelcol/internal/scheduler/host.go @@ -2,7 +2,7 @@ package scheduler import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/internal/scheduler/scheduler.go b/internal/component/otelcol/internal/scheduler/scheduler.go index 5e553dd35a..5e345ff58b 100644 --- a/internal/component/otelcol/internal/scheduler/scheduler.go +++ b/internal/component/otelcol/internal/scheduler/scheduler.go @@ -9,8 +9,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging/level" otelcomponent "go.opentelemetry.io/collector/component" "go.uber.org/multierr" ) diff --git a/internal/component/otelcol/internal/scheduler/scheduler_test.go b/internal/component/otelcol/internal/scheduler/scheduler_test.go index 3d3e0de56e..28f20edccd 100644 --- a/internal/component/otelcol/internal/scheduler/scheduler_test.go +++ b/internal/component/otelcol/internal/scheduler/scheduler_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" ) diff --git a/internal/component/otelcol/processor/attributes/attributes.go b/internal/component/otelcol/processor/attributes/attributes.go index 0f0cef5771..6fa270e5a6 100644 --- a/internal/component/otelcol/processor/attributes/attributes.go +++ b/internal/component/otelcol/processor/attributes/attributes.go @@ -4,10 +4,10 @@ package attributes import ( "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/processor/attributes/attributes_test.go b/internal/component/otelcol/processor/attributes/attributes_test.go index 3a488c45f1..4c29fd5ec4 100644 --- a/internal/component/otelcol/processor/attributes/attributes_test.go +++ b/internal/component/otelcol/processor/attributes/attributes_test.go @@ -6,11 +6,11 @@ import ( "net" "testing" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/attributes" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/attributes" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" diff --git a/internal/component/otelcol/processor/batch/batch.go b/internal/component/otelcol/processor/batch/batch.go index 3f659fb6bd..db433d4c62 100644 --- a/internal/component/otelcol/processor/batch/batch.go +++ b/internal/component/otelcol/processor/batch/batch.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/processor/batchprocessor" diff --git a/internal/component/otelcol/processor/batch/batch_test.go b/internal/component/otelcol/processor/batch/batch_test.go index 10b7add2d8..2fd6b3ffd6 100644 --- a/internal/component/otelcol/processor/batch/batch_test.go +++ b/internal/component/otelcol/processor/batch/batch_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/processor/batch" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/processor/batch" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/discovery/discovery.go b/internal/component/otelcol/processor/discovery/discovery.go index 24de52597a..6001f488e6 100644 --- a/internal/component/otelcol/processor/discovery/discovery.go +++ b/internal/component/otelcol/processor/discovery/discovery.go @@ -6,14 +6,14 @@ import ( "fmt" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + promsdconsumer "github.com/grafana/alloy/internal/static/traces/promsdprocessor/consumer" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/discovery/discovery_test.go b/internal/component/otelcol/processor/discovery/discovery_test.go index ab0aa96658..1b5d935372 100644 --- a/internal/component/otelcol/processor/discovery/discovery_test.go +++ b/internal/component/otelcol/processor/discovery/discovery_test.go @@ -5,11 +5,11 @@ import ( "fmt" "testing" - "github.com/grafana/agent/internal/component/otelcol/processor/discovery" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/flow/componenttest" - promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/processor/discovery" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/flow/componenttest" + promsdconsumer "github.com/grafana/alloy/internal/static/traces/promsdprocessor/consumer" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" semconv "go.opentelemetry.io/collector/semconv/v1.5.0" diff --git a/internal/component/otelcol/processor/filter/filter.go b/internal/component/otelcol/processor/filter/filter.go index 64c01d15b6..19dcdd3d37 100644 --- a/internal/component/otelcol/processor/filter/filter.go +++ b/internal/component/otelcol/processor/filter/filter.go @@ -1,10 +1,10 @@ package filter import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" diff --git a/internal/component/otelcol/processor/filter/filter_test.go b/internal/component/otelcol/processor/filter/filter_test.go index 9589e646cb..eff73e6aef 100644 --- a/internal/component/otelcol/processor/filter/filter_test.go +++ b/internal/component/otelcol/processor/filter/filter_test.go @@ -3,7 +3,7 @@ package filter_test import ( "testing" - "github.com/grafana/agent/internal/component/otelcol/processor/filter" + "github.com/grafana/alloy/internal/component/otelcol/processor/filter" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" diff --git a/internal/component/otelcol/processor/k8sattributes/k8sattributes.go b/internal/component/otelcol/processor/k8sattributes/k8sattributes.go index c5805c525d..08616ff6ea 100644 --- a/internal/component/otelcol/processor/k8sattributes/k8sattributes.go +++ b/internal/component/otelcol/processor/k8sattributes/k8sattributes.go @@ -2,10 +2,10 @@ package k8sattributes import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go index a10d18abf7..bc9e5b7966 100644 --- a/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go +++ b/internal/component/otelcol/processor/k8sattributes/k8sattributes_test.go @@ -3,7 +3,7 @@ package k8sattributes_test import ( "testing" - "github.com/grafana/agent/internal/component/otelcol/processor/k8sattributes" + "github.com/grafana/alloy/internal/component/otelcol/processor/k8sattributes" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/memorylimiter/memorylimiter.go b/internal/component/otelcol/processor/memorylimiter/memorylimiter.go index 745409749c..3ec6ef0b8d 100644 --- a/internal/component/otelcol/processor/memorylimiter/memorylimiter.go +++ b/internal/component/otelcol/processor/memorylimiter/memorylimiter.go @@ -6,10 +6,10 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/processor/memorylimiterprocessor" diff --git a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go index d95ec2c483..96e676d7b2 100644 --- a/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go +++ b/internal/component/otelcol/processor/memorylimiter/memorylimiter_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/processor/memorylimiter" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/processor/memorylimiter" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index e23d183818..b7fb40324d 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -2,10 +2,10 @@ package probabilistic_sampler import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go index bf0192a1d4..46f907de02 100644 --- a/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go +++ b/internal/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" - probabilisticsampler "github.com/grafana/agent/internal/component/otelcol/processor/probabilistic_sampler" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + probabilisticsampler "github.com/grafana/alloy/internal/component/otelcol/processor/probabilistic_sampler" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/processor.go b/internal/component/otelcol/processor/processor.go index 43d626ba5e..449b09dca8 100644 --- a/internal/component/otelcol/processor/processor.go +++ b/internal/component/otelcol/processor/processor.go @@ -7,14 +7,14 @@ import ( "errors" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/lazyconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazyconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/processor/processor_test.go b/internal/component/otelcol/processor/processor_test.go index 3beb97bc88..9c3d8bf649 100644 --- a/internal/component/otelcol/processor/processor_test.go +++ b/internal/component/otelcol/processor/processor_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" otelconsumer "go.opentelemetry.io/collector/consumer" diff --git a/internal/component/otelcol/processor/processortest/processortest.go b/internal/component/otelcol/processor/processortest/processortest.go index 40b8eba4b1..2eda5e2909 100644 --- a/internal/component/otelcol/processor/processortest/processortest.go +++ b/internal/component/otelcol/processor/processortest/processortest.go @@ -6,11 +6,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/plog" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go index ac8a772da9..9aa4fe0365 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -1,7 +1,7 @@ package ec2 import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go index e9c8a43506..d5d23fb8c1 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -1,7 +1,7 @@ package ecs import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go index c44b42a639..f339419220 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -1,7 +1,7 @@ package eks import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go index 3500bc8b33..b16c3c3bc3 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -1,7 +1,7 @@ package elasticbeanstalk import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go index 6f185d9b2d..21afa01aef 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -1,7 +1,7 @@ package lambda import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go index 656875bd27..2fa5991335 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -1,7 +1,7 @@ package aks import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go index 871ea577ac..9d878295f2 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -1,7 +1,7 @@ package azure import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go index b9c6b20886..c0e4b0d00a 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -1,7 +1,7 @@ package consul import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/config/configopaque" diff --git a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go index badceabdbe..7edfcc1607 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -1,7 +1,7 @@ package docker import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go index da95652600..897c5ba794 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -1,7 +1,7 @@ package gcp import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go index 3420a4df07..4b96f95441 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -1,7 +1,7 @@ package heroku import ( - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go index 7e4c365862..071126b3e1 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -1,8 +1,8 @@ package k8snode import ( - "github.com/grafana/agent/internal/component/otelcol" - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/alloy/internal/component/otelcol" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go index 3e6e38af5a..07c0130caa 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -1,8 +1,8 @@ package openshift import ( - "github.com/grafana/agent/internal/component/otelcol" - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/alloy/internal/component/otelcol" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index 8c90539a33..9141db2c90 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -3,7 +3,7 @@ package system import ( "fmt" - rac "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + rac "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" "github.com/grafana/alloy/syntax" ) diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index 7e5d617163..6fa2252ac2 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -4,24 +4,24 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/eks" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/azure" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/azure/aks" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/consul" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/docker" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/gcp" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/heroku" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/heroku" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go index 8a70f5d96d..ae2374405e 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -4,21 +4,21 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/eks" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/azure" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/azure/aks" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/consul" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/docker" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/gcp" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/heroku" - kubernetes_node "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" - "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/heroku" + kubernetes_node "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/alloy/internal/component/otelcol/processor/resourcedetection/internal/system" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" diff --git a/internal/component/otelcol/processor/span/span.go b/internal/component/otelcol/processor/span/span.go index 57e4884709..66e317f863 100644 --- a/internal/component/otelcol/processor/span/span.go +++ b/internal/component/otelcol/processor/span/span.go @@ -4,10 +4,10 @@ package span import ( "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/processor/span/span_test.go b/internal/component/otelcol/processor/span/span_test.go index 6a02063fba..11c1d25edf 100644 --- a/internal/component/otelcol/processor/span/span_test.go +++ b/internal/component/otelcol/processor/span/span_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" - "github.com/grafana/agent/internal/component/otelcol/processor/processortest" - "github.com/grafana/agent/internal/component/otelcol/processor/span" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol/processor/processortest" + "github.com/grafana/alloy/internal/component/otelcol/processor/span" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go index 9eb6c627dc..f2bf705a09 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" tsp "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go index 4ae4e53801..668f7adb7c 100644 --- a/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go +++ b/internal/component/otelcol/processor/tail_sampling/tail_sampling_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/processor/transform/transform.go b/internal/component/otelcol/processor/transform/transform.go index 3dd62edb7f..75f7168007 100644 --- a/internal/component/otelcol/processor/transform/transform.go +++ b/internal/component/otelcol/processor/transform/transform.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor" + "github.com/grafana/alloy/internal/featuregate" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" diff --git a/internal/component/otelcol/processor/transform/transform_test.go b/internal/component/otelcol/processor/transform/transform_test.go index 291e3492f4..79bd6935e3 100644 --- a/internal/component/otelcol/processor/transform/transform_test.go +++ b/internal/component/otelcol/processor/transform/transform_test.go @@ -3,7 +3,7 @@ package transform_test import ( "testing" - "github.com/grafana/agent/internal/component/otelcol/processor/transform" + "github.com/grafana/alloy/internal/component/otelcol/processor/transform" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" diff --git a/internal/component/otelcol/receiver/jaeger/jaeger.go b/internal/component/otelcol/receiver/jaeger/jaeger.go index 81d11952b8..5a63af4c74 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger.go @@ -5,10 +5,10 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" otelcomponent "go.opentelemetry.io/collector/component" otelconfiggrpc "go.opentelemetry.io/collector/config/configgrpc" diff --git a/internal/component/otelcol/receiver/jaeger/jaeger_test.go b/internal/component/otelcol/receiver/jaeger/jaeger_test.go index d8296a210d..85d8164f28 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger_test.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/jaeger" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/jaeger" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index aa1a3e4e52..5ba5ad61af 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -4,10 +4,10 @@ package kafka import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" diff --git a/internal/component/otelcol/receiver/kafka/kafka_test.go b/internal/component/otelcol/receiver/kafka/kafka_test.go index 4238a19c91..b6c10c5d31 100644 --- a/internal/component/otelcol/receiver/kafka/kafka_test.go +++ b/internal/component/otelcol/receiver/kafka/kafka_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/kafka" "github.com/grafana/alloy/syntax" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" diff --git a/internal/component/otelcol/receiver/loki/loki.go b/internal/component/otelcol/receiver/loki/loki.go index 7ff2e594c1..5f2042d1cb 100644 --- a/internal/component/otelcol/receiver/loki/loki.go +++ b/internal/component/otelcol/receiver/loki/loki.go @@ -8,12 +8,12 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" loki_translator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" diff --git a/internal/component/otelcol/receiver/loki/loki_test.go b/internal/component/otelcol/receiver/loki/loki_test.go index 23b766f59e..54969000c1 100644 --- a/internal/component/otelcol/receiver/loki/loki_test.go +++ b/internal/component/otelcol/receiver/loki/loki_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - lokiapi "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + lokiapi "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" diff --git a/internal/component/otelcol/receiver/opencensus/opencensus.go b/internal/component/otelcol/receiver/opencensus/opencensus.go index 64ed1f25ba..1500a560fb 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus.go @@ -3,10 +3,10 @@ package opencensus import ( "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/receiver/opencensus/opencensus_test.go b/internal/component/otelcol/receiver/opencensus/opencensus_test.go index 07e3eae23f..b49e472bc1 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus_test.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/opencensus" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/opencensus" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "github.com/phayes/freeport" diff --git a/internal/component/otelcol/receiver/otlp/otlp.go b/internal/component/otelcol/receiver/otlp/otlp.go index 55863fadee..36c0638d6f 100644 --- a/internal/component/otelcol/receiver/otlp/otlp.go +++ b/internal/component/otelcol/receiver/otlp/otlp.go @@ -6,10 +6,10 @@ import ( net_url "net/url" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/receiver/otlpreceiver" diff --git a/internal/component/otelcol/receiver/otlp/otlp_test.go b/internal/component/otelcol/receiver/otlp/otlp_test.go index 315753b77a..64e2ea601f 100644 --- a/internal/component/otelcol/receiver/otlp/otlp_test.go +++ b/internal/component/otelcol/receiver/otlp/otlp_test.go @@ -8,12 +8,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/receiver/otlp" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/receiver/otlp" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/dskit/backoff" "github.com/phayes/freeport" diff --git a/internal/component/otelcol/receiver/prometheus/prometheus.go b/internal/component/otelcol/receiver/prometheus/prometheus.go index c6aa4ebf59..c3540188e3 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus.go @@ -9,13 +9,13 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/receiver/prometheus/internal" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/receiver/prometheus/internal" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/component/otelcol/receiver/prometheus/prometheus_test.go b/internal/component/otelcol/receiver/prometheus/prometheus_test.go index 7d1836ad29..fd2c32e3dc 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus_test.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/receiver/prometheus" - flowprometheus "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/receiver/prometheus" + flowprometheus "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" diff --git a/internal/component/otelcol/receiver/receiver.go b/internal/component/otelcol/receiver/receiver.go index 55cfebc604..2591b5f4e4 100644 --- a/internal/component/otelcol/receiver/receiver.go +++ b/internal/component/otelcol/receiver/receiver.go @@ -7,14 +7,14 @@ import ( "errors" "os" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fanoutconsumer" - "github.com/grafana/agent/internal/component/otelcol/internal/lazycollector" - "github.com/grafana/agent/internal/component/otelcol/internal/scheduler" - "github.com/grafana/agent/internal/component/otelcol/internal/views" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fanoutconsumer" + "github.com/grafana/alloy/internal/component/otelcol/internal/lazycollector" + "github.com/grafana/alloy/internal/component/otelcol/internal/scheduler" + "github.com/grafana/alloy/internal/component/otelcol/internal/views" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/prometheus/client_golang/prometheus" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/receiver/receiver_test.go b/internal/component/otelcol/receiver/receiver_test.go index 989b40fa9f..5c8ff582ad 100644 --- a/internal/component/otelcol/receiver/receiver_test.go +++ b/internal/component/otelcol/receiver/receiver_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/internal/fakeconsumer" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/internal/fakeconsumer" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" otelcomponent "go.opentelemetry.io/collector/component" otelconsumer "go.opentelemetry.io/collector/consumer" diff --git a/internal/component/otelcol/receiver/vcenter/vcenter.go b/internal/component/otelcol/receiver/vcenter/vcenter.go index e1bad99588..2004e9fdd5 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter.go @@ -5,10 +5,10 @@ import ( "fmt" "net/url" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" diff --git a/internal/component/otelcol/receiver/vcenter/vcenter_test.go b/internal/component/otelcol/receiver/vcenter/vcenter_test.go index 5e2cd07128..a42811a7f6 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter_test.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/vcenter" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/vcenter" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" "github.com/stretchr/testify/require" diff --git a/internal/component/otelcol/receiver/zipkin/zipkin.go b/internal/component/otelcol/receiver/zipkin/zipkin.go index f63cc06183..ac6a85dd85 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin.go @@ -2,10 +2,10 @@ package zipkin import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver" + "github.com/grafana/alloy/internal/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" diff --git a/internal/component/otelcol/receiver/zipkin/zipkin_test.go b/internal/component/otelcol/receiver/zipkin/zipkin_test.go index 29839c46b7..a83237dff3 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin_test.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/zipkin" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/zipkin" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" "github.com/phayes/freeport" diff --git a/internal/component/prometheus/exporter/apache/apache.go b/internal/component/prometheus/exporter/apache/apache.go index 5cc988cf1c..0d2b95c5fd 100644 --- a/internal/component/prometheus/exporter/apache/apache.go +++ b/internal/component/prometheus/exporter/apache/apache.go @@ -1,11 +1,11 @@ package apache import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/apache_http" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/apache_http" ) func init() { diff --git a/internal/component/prometheus/exporter/azure/azure.go b/internal/component/prometheus/exporter/azure/azure.go index ed410e7029..e51fd7f744 100644 --- a/internal/component/prometheus/exporter/azure/azure.go +++ b/internal/component/prometheus/exporter/azure/azure.go @@ -1,11 +1,11 @@ package azure import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/azure_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/azure_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/blackbox/blackbox.go b/internal/component/prometheus/exporter/blackbox/blackbox.go index e32a630660..def1b0560e 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox.go @@ -8,13 +8,13 @@ import ( blackbox_config "github.com/prometheus/blackbox_exporter/config" "gopkg.in/yaml.v2" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/component/prometheus/exporter/blackbox/blackbox_test.go b/internal/component/prometheus/exporter/blackbox/blackbox_test.go index 39ecf1bb79..ba0f63e123 100644 --- a/internal/component/prometheus/exporter/blackbox/blackbox_test.go +++ b/internal/component/prometheus/exporter/blackbox/blackbox_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/syntax" blackbox_config "github.com/prometheus/blackbox_exporter/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor.go b/internal/component/prometheus/exporter/cadvisor/cadvisor.go index 223203dffa..0e3584a7d6 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor.go @@ -3,11 +3,11 @@ package cadvisor import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/cadvisor" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/cadvisor" ) func init() { diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go index fd9c4edaf2..2e9b44d39a 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/static/integrations/cadvisor" + "github.com/grafana/alloy/internal/static/integrations/cadvisor" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/cloudwatch/cloudwatch.go b/internal/component/prometheus/exporter/cloudwatch/cloudwatch.go index a7ec9a96af..b87a7b2b8b 100644 --- a/internal/component/prometheus/exporter/cloudwatch/cloudwatch.go +++ b/internal/component/prometheus/exporter/cloudwatch/cloudwatch.go @@ -3,11 +3,11 @@ package cloudwatch import ( "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/cloudwatch/config.go b/internal/component/prometheus/exporter/cloudwatch/config.go index c87de8b372..75680c83ac 100644 --- a/internal/component/prometheus/exporter/cloudwatch/config.go +++ b/internal/component/prometheus/exporter/cloudwatch/config.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "time" - "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" + "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" "github.com/grafana/alloy/syntax" yaceConf "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/config" yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" diff --git a/internal/component/prometheus/exporter/consul/consul.go b/internal/component/prometheus/exporter/consul/consul.go index 9647dc32b2..4cc528ff8f 100644 --- a/internal/component/prometheus/exporter/consul/consul.go +++ b/internal/component/prometheus/exporter/consul/consul.go @@ -3,11 +3,11 @@ package consul import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/consul_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/consul_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go b/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go index bd3abc2077..adacaf997b 100644 --- a/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go +++ b/internal/component/prometheus/exporter/dnsmasq/dnsmasq.go @@ -1,11 +1,11 @@ package dnsmasq import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go index 07c863f334..21b54aa817 100644 --- a/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go +++ b/internal/component/prometheus/exporter/dnsmasq/dnsmasq_test.go @@ -3,7 +3,7 @@ package dnsmasq import ( "testing" - "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" + "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go index 6cd3b98c3c..84996d7db1 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch.go @@ -3,12 +3,12 @@ package elasticsearch import ( "time" - "github.com/grafana/agent/internal/component" - commonCfg "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" + "github.com/grafana/alloy/internal/component" + commonCfg "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go index 4983258419..9f4deedfb9 100644 --- a/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go +++ b/internal/component/prometheus/exporter/elasticsearch/elasticsearch_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - commonCfg "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" + commonCfg "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" promCfg "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/exporter.go b/internal/component/prometheus/exporter/exporter.go index 40572f21a0..67f21fd89f 100644 --- a/internal/component/prometheus/exporter/exporter.go +++ b/internal/component/prometheus/exporter/exporter.go @@ -9,11 +9,11 @@ import ( "strings" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/flow/logging/level" - http_service "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/flow/logging/level" + http_service "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/static/integrations" "github.com/prometheus/common/model" ) diff --git a/internal/component/prometheus/exporter/gcp/gcp.go b/internal/component/prometheus/exporter/gcp/gcp.go index b3fb702f7a..cceed35743 100644 --- a/internal/component/prometheus/exporter/gcp/gcp.go +++ b/internal/component/prometheus/exporter/gcp/gcp.go @@ -3,11 +3,11 @@ package gcp import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/gcp_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/github/github.go b/internal/component/prometheus/exporter/github/github.go index 1b321f0ce5..25d442d4e3 100644 --- a/internal/component/prometheus/exporter/github/github.go +++ b/internal/component/prometheus/exporter/github/github.go @@ -1,11 +1,11 @@ package github import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/github_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/github_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/kafka/kafka.go b/internal/component/prometheus/exporter/kafka/kafka.go index 95738f53d8..e4f32aea2e 100644 --- a/internal/component/prometheus/exporter/kafka/kafka.go +++ b/internal/component/prometheus/exporter/kafka/kafka.go @@ -4,12 +4,12 @@ import ( "fmt" "github.com/IBM/sarama" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/kafka_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/kafka/kafka_test.go b/internal/component/prometheus/exporter/kafka/kafka_test.go index 287d2a601f..a3d34fc311 100644 --- a/internal/component/prometheus/exporter/kafka/kafka_test.go +++ b/internal/component/prometheus/exporter/kafka/kafka_test.go @@ -3,8 +3,8 @@ package kafka import ( "testing" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/static/integrations/kafka_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/memcached/memcached.go b/internal/component/prometheus/exporter/memcached/memcached.go index 2a1a6d3588..fcecc69b77 100644 --- a/internal/component/prometheus/exporter/memcached/memcached.go +++ b/internal/component/prometheus/exporter/memcached/memcached.go @@ -3,12 +3,12 @@ package memcached import ( "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/memcached_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" ) func init() { diff --git a/internal/component/prometheus/exporter/memcached/memcached_test.go b/internal/component/prometheus/exporter/memcached/memcached_test.go index 167600a9d1..75659b6675 100644 --- a/internal/component/prometheus/exporter/memcached/memcached_test.go +++ b/internal/component/prometheus/exporter/memcached/memcached_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/static/integrations/memcached_exporter" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/assert" ) diff --git a/internal/component/prometheus/exporter/mongodb/mongodb.go b/internal/component/prometheus/exporter/mongodb/mongodb.go index c59c3eeaab..674562fed2 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb.go @@ -1,11 +1,11 @@ package mongodb import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/mongodb/mongodb_test.go b/internal/component/prometheus/exporter/mongodb/mongodb_test.go index 94f3c0152f..4b1937567b 100644 --- a/internal/component/prometheus/exporter/mongodb/mongodb_test.go +++ b/internal/component/prometheus/exporter/mongodb/mongodb_test.go @@ -3,7 +3,7 @@ package mongodb import ( "testing" - "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" + "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/mssql/mssql.go b/internal/component/prometheus/exporter/mssql/mssql.go index e50f064919..dd0efb3bd9 100644 --- a/internal/component/prometheus/exporter/mssql/mssql.go +++ b/internal/component/prometheus/exporter/mssql/mssql.go @@ -6,12 +6,12 @@ import ( "time" "github.com/burningalchemist/sql_exporter/config" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/mssql" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/mssql" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" "gopkg.in/yaml.v2" diff --git a/internal/component/prometheus/exporter/mssql/mssql_test.go b/internal/component/prometheus/exporter/mssql/mssql_test.go index 12f3fcc7e7..9b7762f8a7 100644 --- a/internal/component/prometheus/exporter/mssql/mssql_test.go +++ b/internal/component/prometheus/exporter/mssql/mssql_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/burningalchemist/sql_exporter/config" - "github.com/grafana/agent/internal/static/integrations/mssql" + "github.com/grafana/alloy/internal/static/integrations/mssql" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/mysql/mysql.go b/internal/component/prometheus/exporter/mysql/mysql.go index 3fb75f2c52..38887556b4 100644 --- a/internal/component/prometheus/exporter/mysql/mysql.go +++ b/internal/component/prometheus/exporter/mysql/mysql.go @@ -2,11 +2,11 @@ package mysql import ( "github.com/go-sql-driver/mysql" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/mysql/mysql_test.go b/internal/component/prometheus/exporter/mysql/mysql_test.go index a42c212e5b..4018d30511 100644 --- a/internal/component/prometheus/exporter/mysql/mysql_test.go +++ b/internal/component/prometheus/exporter/mysql/mysql_test.go @@ -3,7 +3,7 @@ package mysql import ( "testing" - "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" + "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" diff --git a/internal/component/prometheus/exporter/oracledb/oracledb.go b/internal/component/prometheus/exporter/oracledb/oracledb.go index dd71616d68..b5425ca83a 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb.go @@ -5,11 +5,11 @@ import ( "fmt" "net/url" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/oracledb/oracledb_test.go b/internal/component/prometheus/exporter/oracledb/oracledb_test.go index 6ae79e1143..56794e46f8 100644 --- a/internal/component/prometheus/exporter/oracledb/oracledb_test.go +++ b/internal/component/prometheus/exporter/oracledb/oracledb_test.go @@ -4,7 +4,7 @@ import ( "errors" "testing" - "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" + "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/postgres/postgres.go b/internal/component/prometheus/exporter/postgres/postgres.go index ec227308a3..79ee26406e 100644 --- a/internal/component/prometheus/exporter/postgres/postgres.go +++ b/internal/component/prometheus/exporter/postgres/postgres.go @@ -4,11 +4,11 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/postgres_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" "github.com/grafana/alloy/syntax/alloytypes" "github.com/lib/pq" config_util "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/postgres/postgres_test.go b/internal/component/prometheus/exporter/postgres/postgres_test.go index 3306425e5d..c88f31c4da 100644 --- a/internal/component/prometheus/exporter/postgres/postgres_test.go +++ b/internal/component/prometheus/exporter/postgres/postgres_test.go @@ -3,7 +3,7 @@ package postgres import ( "testing" - "github.com/grafana/agent/internal/static/integrations/postgres_exporter" + "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" rivertypes "github.com/grafana/alloy/syntax/alloytypes" diff --git a/internal/component/prometheus/exporter/process/process.go b/internal/component/prometheus/exporter/process/process.go index fbf27a256a..ffdfb90a14 100644 --- a/internal/component/prometheus/exporter/process/process.go +++ b/internal/component/prometheus/exporter/process/process.go @@ -1,11 +1,11 @@ package process import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/process_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/process_exporter" exporter_config "github.com/ncabatoff/process-exporter/config" ) diff --git a/internal/component/prometheus/exporter/redis/redis.go b/internal/component/prometheus/exporter/redis/redis.go index 61bd9977c4..93d5b5326b 100644 --- a/internal/component/prometheus/exporter/redis/redis.go +++ b/internal/component/prometheus/exporter/redis/redis.go @@ -5,11 +5,11 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/redis_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/redis_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/redis/redis_test.go b/internal/component/prometheus/exporter/redis/redis_test.go index 5c4f926c1d..777daecf3b 100644 --- a/internal/component/prometheus/exporter/redis/redis_test.go +++ b/internal/component/prometheus/exporter/redis/redis_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/static/integrations/redis_exporter" + "github.com/grafana/alloy/internal/static/integrations/redis_exporter" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/component/prometheus/exporter/self/self.go b/internal/component/prometheus/exporter/self/self.go index 32735c19e5..90f046c52d 100644 --- a/internal/component/prometheus/exporter/self/self.go +++ b/internal/component/prometheus/exporter/self/self.go @@ -1,11 +1,11 @@ package self import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/agent" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/agent" ) func init() { diff --git a/internal/component/prometheus/exporter/snmp/snmp.go b/internal/component/prometheus/exporter/snmp/snmp.go index 5ad5df6bfa..2d2994f7b6 100644 --- a/internal/component/prometheus/exporter/snmp/snmp.go +++ b/internal/component/prometheus/exporter/snmp/snmp.go @@ -5,12 +5,12 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" "gopkg.in/yaml.v2" diff --git a/internal/component/prometheus/exporter/snmp/snmp_test.go b/internal/component/prometheus/exporter/snmp/snmp_test.go index 296f2792c1..26a5df6370 100644 --- a/internal/component/prometheus/exporter/snmp/snmp_test.go +++ b/internal/component/prometheus/exporter/snmp/snmp_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/syntax" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/exporter/snowflake/snowflake.go b/internal/component/prometheus/exporter/snowflake/snowflake.go index 72bd01e4b7..b4d7e63732 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake.go @@ -1,11 +1,11 @@ package snowflake import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/snowflake/snowflake_test.go b/internal/component/prometheus/exporter/snowflake/snowflake_test.go index 1a70433c56..47f8a2ae43 100644 --- a/internal/component/prometheus/exporter/snowflake/snowflake_test.go +++ b/internal/component/prometheus/exporter/snowflake/snowflake_test.go @@ -3,7 +3,7 @@ package snowflake import ( "testing" - "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" + "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" config_util "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/squid/squid.go b/internal/component/prometheus/exporter/squid/squid.go index e08e404cbf..1beb43ec26 100644 --- a/internal/component/prometheus/exporter/squid/squid.go +++ b/internal/component/prometheus/exporter/squid/squid.go @@ -3,11 +3,11 @@ package squid import ( "net" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/squid_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/squid_exporter" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" ) diff --git a/internal/component/prometheus/exporter/squid/squid_test.go b/internal/component/prometheus/exporter/squid/squid_test.go index 522ca90bac..ff5bb03850 100644 --- a/internal/component/prometheus/exporter/squid/squid_test.go +++ b/internal/component/prometheus/exporter/squid/squid_test.go @@ -4,7 +4,7 @@ import ( "errors" "testing" - "github.com/grafana/agent/internal/static/integrations/squid_exporter" + "github.com/grafana/alloy/internal/static/integrations/squid_exporter" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/exporter/statsd/config.go b/internal/component/prometheus/exporter/statsd/config.go index 18525450a9..31a97fdf74 100644 --- a/internal/component/prometheus/exporter/statsd/config.go +++ b/internal/component/prometheus/exporter/statsd/config.go @@ -5,7 +5,7 @@ import ( "os" "time" - "github.com/grafana/agent/internal/static/integrations/statsd_exporter" + "github.com/grafana/alloy/internal/static/integrations/statsd_exporter" "gopkg.in/yaml.v3" ) diff --git a/internal/component/prometheus/exporter/statsd/statsd.go b/internal/component/prometheus/exporter/statsd/statsd.go index fe09c168ec..3323971f69 100644 --- a/internal/component/prometheus/exporter/statsd/statsd.go +++ b/internal/component/prometheus/exporter/statsd/statsd.go @@ -1,10 +1,10 @@ package statsd import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" ) func init() { diff --git a/internal/component/prometheus/exporter/unix/config.go b/internal/component/prometheus/exporter/unix/config.go index f5303566e0..2a89c5594b 100644 --- a/internal/component/prometheus/exporter/unix/config.go +++ b/internal/component/prometheus/exporter/unix/config.go @@ -3,7 +3,7 @@ package unix import ( "time" - node_integration "github.com/grafana/agent/internal/static/integrations/node_exporter" + node_integration "github.com/grafana/alloy/internal/static/integrations/node_exporter" "github.com/grafana/dskit/flagext" ) diff --git a/internal/component/prometheus/exporter/unix/unix.go b/internal/component/prometheus/exporter/unix/unix.go index 8bf988b08a..9f5f3666c9 100644 --- a/internal/component/prometheus/exporter/unix/unix.go +++ b/internal/component/prometheus/exporter/unix/unix.go @@ -1,10 +1,10 @@ package unix import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" ) func init() { diff --git a/internal/component/prometheus/exporter/windows/config.go b/internal/component/prometheus/exporter/windows/config.go index e4bff8c46c..cc114196bb 100644 --- a/internal/component/prometheus/exporter/windows/config.go +++ b/internal/component/prometheus/exporter/windows/config.go @@ -3,7 +3,7 @@ package windows import ( "strings" - windows_integration "github.com/grafana/agent/internal/static/integrations/windows_exporter" + windows_integration "github.com/grafana/alloy/internal/static/integrations/windows_exporter" ) // Arguments is used for controlling for this exporter. diff --git a/internal/component/prometheus/exporter/windows/config_windows.go b/internal/component/prometheus/exporter/windows/config_windows.go index d1e138b9b5..de4d29eff1 100644 --- a/internal/component/prometheus/exporter/windows/config_windows.go +++ b/internal/component/prometheus/exporter/windows/config_windows.go @@ -3,7 +3,7 @@ package windows import ( "strings" - windows_integration "github.com/grafana/agent/internal/static/integrations/windows_exporter" + windows_integration "github.com/grafana/alloy/internal/static/integrations/windows_exporter" col "github.com/prometheus-community/windows_exporter/pkg/collector" ) diff --git a/internal/component/prometheus/exporter/windows/windows.go b/internal/component/prometheus/exporter/windows/windows.go index 0b7f151b75..6387208493 100644 --- a/internal/component/prometheus/exporter/windows/windows.go +++ b/internal/component/prometheus/exporter/windows/windows.go @@ -1,10 +1,10 @@ package windows import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/exporter" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/exporter" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/static/integrations" ) func init() { diff --git a/internal/component/prometheus/fanout.go b/internal/component/prometheus/fanout.go index e25e37141f..98886cb787 100644 --- a/internal/component/prometheus/fanout.go +++ b/internal/component/prometheus/fanout.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/hashicorp/go-multierror" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" diff --git a/internal/component/prometheus/fanout_test.go b/internal/component/prometheus/fanout_test.go index 8b89201604..a06d163970 100644 --- a/internal/component/prometheus/fanout_test.go +++ b/internal/component/prometheus/fanout_test.go @@ -3,7 +3,7 @@ package prometheus import ( "testing" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" diff --git a/internal/component/prometheus/interceptor.go b/internal/component/prometheus/interceptor.go index d8e515ec5d..3ada4a2120 100644 --- a/internal/component/prometheus/interceptor.go +++ b/internal/component/prometheus/interceptor.go @@ -3,7 +3,7 @@ package prometheus import ( "context" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/prometheus/operator/common/component.go b/internal/component/prometheus/operator/common/component.go index 7df6d5aa5c..006f014259 100644 --- a/internal/component/prometheus/operator/common/component.go +++ b/internal/component/prometheus/operator/common/component.go @@ -8,11 +8,11 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" + "github.com/grafana/alloy/internal/service/labelstore" "gopkg.in/yaml.v3" ) diff --git a/internal/component/prometheus/operator/common/crdmanager.go b/internal/component/prometheus/operator/common/crdmanager.go index d5f4b9bbb9..51414fc75b 100644 --- a/internal/component/prometheus/operator/common/crdmanager.go +++ b/internal/component/prometheus/operator/common/crdmanager.go @@ -10,12 +10,12 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" - "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" + "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/grafana/ckit/shard" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -28,9 +28,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/component/prometheus/operator/configgen" - compscrape "github.com/grafana/agent/internal/component/prometheus/scrape" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/component/prometheus/operator/configgen" + compscrape "github.com/grafana/alloy/internal/component/prometheus/scrape" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" diff --git a/internal/component/prometheus/operator/common/crdmanager_test.go b/internal/component/prometheus/operator/common/crdmanager_test.go index abd6aeffb3..e1fb880bbf 100644 --- a/internal/component/prometheus/operator/common/crdmanager_test.go +++ b/internal/component/prometheus/operator/common/crdmanager_test.go @@ -6,10 +6,10 @@ import ( "golang.org/x/exp/maps" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/service/cluster" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/service/cluster" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/internal/component/prometheus/operator/configgen/config_gen.go b/internal/component/prometheus/operator/configgen/config_gen.go index 5c215b3b8f..53f7bf5ed6 100644 --- a/internal/component/prometheus/operator/configgen/config_gen.go +++ b/internal/component/prometheus/operator/configgen/config_gen.go @@ -5,9 +5,9 @@ package configgen import ( "regexp" - k8sConfig "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus/operator" + k8sConfig "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus/operator" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go b/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go index fca82b1d1d..7f9a6b68f5 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_podmonitor_test.go @@ -7,10 +7,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/operator/configgen/config_gen_probe_test.go b/internal/component/prometheus/operator/configgen/config_gen_probe_test.go index 25b2910a3f..41d59321aa 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_probe_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_probe_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go index 80fb36e87e..1e77469bf1 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_servicemonitor_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/util" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" commonConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/operator/configgen/config_gen_test.go b/internal/component/prometheus/operator/configgen/config_gen_test.go index e399b1ba4b..fad7bfcc83 100644 --- a/internal/component/prometheus/operator/configgen/config_gen_test.go +++ b/internal/component/prometheus/operator/configgen/config_gen_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus/operator" promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/operator/podmonitors/operator.go b/internal/component/prometheus/operator/podmonitors/operator.go index b420b404d4..ed3cf1bec1 100644 --- a/internal/component/prometheus/operator/podmonitors/operator.go +++ b/internal/component/prometheus/operator/podmonitors/operator.go @@ -1,10 +1,10 @@ package podmonitors import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/component/prometheus/operator/common" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/component/prometheus/operator/common" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/prometheus/operator/probes/probes.go b/internal/component/prometheus/operator/probes/probes.go index 42fb6ace17..be75ba0c9a 100644 --- a/internal/component/prometheus/operator/probes/probes.go +++ b/internal/component/prometheus/operator/probes/probes.go @@ -1,10 +1,10 @@ package probes import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/component/prometheus/operator/common" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/component/prometheus/operator/common" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/prometheus/operator/servicemonitors/servicemonitors.go b/internal/component/prometheus/operator/servicemonitors/servicemonitors.go index 8c86c8d07d..d6231b92ec 100644 --- a/internal/component/prometheus/operator/servicemonitors/servicemonitors.go +++ b/internal/component/prometheus/operator/servicemonitors/servicemonitors.go @@ -1,10 +1,10 @@ package servicemonitors import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus/operator" - "github.com/grafana/agent/internal/component/prometheus/operator/common" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus/operator" + "github.com/grafana/alloy/internal/component/prometheus/operator/common" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/prometheus/operator/types.go b/internal/component/prometheus/operator/types.go index 7f06f2f040..14c42410f6 100644 --- a/internal/component/prometheus/operator/types.go +++ b/internal/component/prometheus/operator/types.go @@ -3,11 +3,11 @@ package operator import ( "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/common/kubernetes" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus/scrape" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/common/kubernetes" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus/scrape" + "github.com/grafana/alloy/internal/service/cluster" "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/storage" diff --git a/internal/component/prometheus/receive_http/receive_http.go b/internal/component/prometheus/receive_http/receive_http.go index 54200481bd..c6c511fff8 100644 --- a/internal/component/prometheus/receive_http/receive_http.go +++ b/internal/component/prometheus/receive_http/receive_http.go @@ -8,13 +8,13 @@ import ( "sync" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/component" - fnet "github.com/grafana/agent/internal/component/common/net" - agentprom "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + fnet "github.com/grafana/alloy/internal/component/common/net" + agentprom "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" diff --git a/internal/component/prometheus/receive_http/receive_http_test.go b/internal/component/prometheus/receive_http/receive_http_test.go index 9d72dae277..37a1dd2e37 100644 --- a/internal/component/prometheus/receive_http/receive_http_test.go +++ b/internal/component/prometheus/receive_http/receive_http_test.go @@ -9,11 +9,11 @@ import ( "time" "github.com/golang/snappy" - "github.com/grafana/agent/internal/component" - fnet "github.com/grafana/agent/internal/component/common/net" - agentprom "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + fnet "github.com/grafana/alloy/internal/component/common/net" + agentprom "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/util" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" diff --git a/internal/component/prometheus/relabel/relabel.go b/internal/component/prometheus/relabel/relabel.go index 0a4a81510c..9db46df950 100644 --- a/internal/component/prometheus/relabel/relabel.go +++ b/internal/component/prometheus/relabel/relabel.go @@ -5,11 +5,11 @@ import ( "fmt" "sync" - "github.com/grafana/agent/internal/component" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/component" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/service/labelstore" lru "github.com/hashicorp/golang-lru/v2" prometheus_client "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" diff --git a/internal/component/prometheus/relabel/relabel_test.go b/internal/component/prometheus/relabel/relabel_test.go index c7d0a04442..69692050ad 100644 --- a/internal/component/prometheus/relabel/relabel_test.go +++ b/internal/component/prometheus/relabel/relabel_test.go @@ -8,12 +8,12 @@ import ( "context" - "github.com/grafana/agent/internal/component" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/prometheus/remotewrite/cli.go b/internal/component/prometheus/remotewrite/cli.go index df6dfee346..74a21ee2dd 100644 --- a/internal/component/prometheus/remotewrite/cli.go +++ b/internal/component/prometheus/remotewrite/cli.go @@ -6,7 +6,7 @@ import ( "path/filepath" "sort" - "github.com/grafana/agent/internal/static/agentctl/waltools" + "github.com/grafana/alloy/internal/static/agentctl/waltools" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" ) diff --git a/internal/component/prometheus/remotewrite/remote_write.go b/internal/component/prometheus/remotewrite/remote_write.go index b5b00b78b8..14bf08c8b4 100644 --- a/internal/component/prometheus/remotewrite/remote_write.go +++ b/internal/component/prometheus/remotewrite/remote_write.go @@ -10,14 +10,14 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/static/metrics/wal" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/static/metrics/wal" + "github.com/grafana/alloy/internal/useragent" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/prometheus/remotewrite/remote_write_test.go b/internal/component/prometheus/remotewrite/remote_write_test.go index 51f528d6a4..654cb06a83 100644 --- a/internal/component/prometheus/remotewrite/remote_write_test.go +++ b/internal/component/prometheus/remotewrite/remote_write_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/prometheus/remotewrite" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" diff --git a/internal/component/prometheus/remotewrite/types.go b/internal/component/prometheus/remotewrite/types.go index f678e0ca51..6d365270a7 100644 --- a/internal/component/prometheus/remotewrite/types.go +++ b/internal/component/prometheus/remotewrite/types.go @@ -6,8 +6,8 @@ import ( "sort" "time" - types "github.com/grafana/agent/internal/component/common/config" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" + types "github.com/grafana/alloy/internal/component/common/config" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" "github.com/grafana/alloy/syntax/alloytypes" "github.com/google/uuid" diff --git a/internal/component/prometheus/scrape/scrape.go b/internal/component/prometheus/scrape/scrape.go index 31ea701251..fe3a8b46c9 100644 --- a/internal/component/prometheus/scrape/scrape.go +++ b/internal/component/prometheus/scrape/scrape.go @@ -8,16 +8,16 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component" - component_config "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" - "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/component" + component_config "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" + "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/useragent" client_prometheus "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" diff --git a/internal/component/prometheus/scrape/scrape_test.go b/internal/component/prometheus/scrape/scrape_test.go index 630a3d7f5c..c700be4a3d 100644 --- a/internal/component/prometheus/scrape/scrape_test.go +++ b/internal/component/prometheus/scrape/scrape_test.go @@ -8,12 +8,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/prometheus" - "github.com/grafana/agent/internal/service/cluster" - http_service "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/prometheus" + "github.com/grafana/alloy/internal/service/cluster" + http_service "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/ckit/memconn" prometheus_client "github.com/prometheus/client_golang/prometheus" diff --git a/internal/component/pyroscope/ebpf/args.go b/internal/component/pyroscope/ebpf/args.go index 808a121d82..d01840f427 100644 --- a/internal/component/pyroscope/ebpf/args.go +++ b/internal/component/pyroscope/ebpf/args.go @@ -3,8 +3,8 @@ package ebpf import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/pyroscope" ) type Arguments struct { diff --git a/internal/component/pyroscope/ebpf/ebpf_linux.go b/internal/component/pyroscope/ebpf/ebpf_linux.go index 7416ee8854..bc5c16f56d 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux.go @@ -10,10 +10,10 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ebpfspy "github.com/grafana/pyroscope/ebpf" demangle2 "github.com/grafana/pyroscope/ebpf/cpp/demangle" "github.com/grafana/pyroscope/ebpf/pprof" diff --git a/internal/component/pyroscope/ebpf/ebpf_linux_test.go b/internal/component/pyroscope/ebpf/ebpf_linux_test.go index 71e49f53c2..97baf8f8f6 100644 --- a/internal/component/pyroscope/ebpf/ebpf_linux_test.go +++ b/internal/component/pyroscope/ebpf/ebpf_linux_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" ebpfspy "github.com/grafana/pyroscope/ebpf" "github.com/grafana/pyroscope/ebpf/pprof" diff --git a/internal/component/pyroscope/ebpf/ebpf_placeholder.go b/internal/component/pyroscope/ebpf/ebpf_placeholder.go index 18e6aabe5c..b7da8f531a 100644 --- a/internal/component/pyroscope/ebpf/ebpf_placeholder.go +++ b/internal/component/pyroscope/ebpf/ebpf_placeholder.go @@ -5,9 +5,9 @@ package ebpf import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/pyroscope/java/args.go b/internal/component/pyroscope/java/args.go index b6529ef368..85ba52b77a 100644 --- a/internal/component/pyroscope/java/args.go +++ b/internal/component/pyroscope/java/args.go @@ -3,8 +3,8 @@ package java import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/pyroscope" ) type Arguments struct { diff --git a/internal/component/pyroscope/java/java.go b/internal/component/pyroscope/java/java.go index d2832df519..27e489780f 100644 --- a/internal/component/pyroscope/java/java.go +++ b/internal/component/pyroscope/java/java.go @@ -9,11 +9,11 @@ import ( "strconv" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/component/pyroscope/java/asprof" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/pyroscope/java/asprof" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) const ( diff --git a/internal/component/pyroscope/java/java_stub.go b/internal/component/pyroscope/java/java_stub.go index 25daeaf182..6548e77715 100644 --- a/internal/component/pyroscope/java/java_stub.go +++ b/internal/component/pyroscope/java/java_stub.go @@ -5,9 +5,9 @@ package java import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/component/pyroscope/java/loop.go b/internal/component/pyroscope/java/loop.go index 0e591b9501..3fd7f1c271 100644 --- a/internal/component/pyroscope/java/loop.go +++ b/internal/component/pyroscope/java/loop.go @@ -13,10 +13,10 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/component/pyroscope/java/asprof" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/pyroscope/java/asprof" + "github.com/grafana/alloy/internal/flow/logging/level" jfrpprof "github.com/grafana/jfr-parser/pprof" jfrpprofPyroscope "github.com/grafana/jfr-parser/pprof/pyroscope" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/java/target.go b/internal/component/pyroscope/java/target.go index 52fa2d7a97..cffdf967b5 100644 --- a/internal/component/pyroscope/java/target.go +++ b/internal/component/pyroscope/java/target.go @@ -3,7 +3,7 @@ package java import ( "fmt" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" ) const ( diff --git a/internal/component/pyroscope/scrape/delta_profiles.go b/internal/component/pyroscope/scrape/delta_profiles.go index 96ac80e5ae..926b7179cc 100644 --- a/internal/component/pyroscope/scrape/delta_profiles.go +++ b/internal/component/pyroscope/scrape/delta_profiles.go @@ -8,8 +8,8 @@ import ( "io" "sync" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/fastdelta" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/fastdelta" "github.com/klauspost/compress/gzip" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/scrape/delta_profiles_test.go b/internal/component/pyroscope/scrape/delta_profiles_test.go index 40d807c87c..29cbfd86f2 100644 --- a/internal/component/pyroscope/scrape/delta_profiles_test.go +++ b/internal/component/pyroscope/scrape/delta_profiles_test.go @@ -9,7 +9,7 @@ import ( googlev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" - "github.com/grafana/agent/internal/component/pyroscope" + "github.com/grafana/alloy/internal/component/pyroscope" "github.com/klauspost/compress/gzip" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/scrape/internal/fastdelta/delta_map.go b/internal/component/pyroscope/scrape/internal/fastdelta/delta_map.go index 9d247bcc8b..94db57720a 100644 --- a/internal/component/pyroscope/scrape/internal/fastdelta/delta_map.go +++ b/internal/component/pyroscope/scrape/internal/fastdelta/delta_map.go @@ -10,7 +10,7 @@ import ( "github.com/spaolacci/murmur3" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/pproflite" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/pproflite" ) // As of Go 1.19, the Go heap profile has 4 values per sample, with 2 of them diff --git a/internal/component/pyroscope/scrape/internal/fastdelta/fd.go b/internal/component/pyroscope/scrape/internal/fastdelta/fd.go index d956e5f0fa..d1e1699c49 100644 --- a/internal/component/pyroscope/scrape/internal/fastdelta/fd.go +++ b/internal/component/pyroscope/scrape/internal/fastdelta/fd.go @@ -67,7 +67,7 @@ import ( "github.com/spaolacci/murmur3" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/pproflite" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/pproflite" ) // ValueType describes the type and unit of a value. diff --git a/internal/component/pyroscope/scrape/internal/fastdelta/fuzz_test.go b/internal/component/pyroscope/scrape/internal/fastdelta/fuzz_test.go index eeef83459d..432ed0b3be 100644 --- a/internal/component/pyroscope/scrape/internal/fastdelta/fuzz_test.go +++ b/internal/component/pyroscope/scrape/internal/fastdelta/fuzz_test.go @@ -11,7 +11,7 @@ import ( "io" "testing" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/fastdelta" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/fastdelta" ) // FuzzDelta looks for inputs to delta which cause crashes. This is to account diff --git a/internal/component/pyroscope/scrape/internal/fastdelta/hasher.go b/internal/component/pyroscope/scrape/internal/fastdelta/hasher.go index 52ede6de9f..dd96ed8042 100644 --- a/internal/component/pyroscope/scrape/internal/fastdelta/hasher.go +++ b/internal/component/pyroscope/scrape/internal/fastdelta/hasher.go @@ -13,7 +13,7 @@ import ( "github.com/spaolacci/murmur3" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/pproflite" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/pproflite" ) // Hash is a 128-bit hash representing sample identity diff --git a/internal/component/pyroscope/scrape/internal/pproflite/pproflite_test.go b/internal/component/pyroscope/scrape/internal/pproflite/pproflite_test.go index b240b1dd61..7c93708912 100644 --- a/internal/component/pyroscope/scrape/internal/pproflite/pproflite_test.go +++ b/internal/component/pyroscope/scrape/internal/pproflite/pproflite_test.go @@ -14,7 +14,7 @@ import ( "github.com/google/pprof/profile" "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/component/pyroscope/scrape/internal/pproflite" + "github.com/grafana/alloy/internal/component/pyroscope/scrape/internal/pproflite" ) func TestDecoderEncoder(t *testing.T) { diff --git a/internal/component/pyroscope/scrape/manager.go b/internal/component/pyroscope/scrape/manager.go index 7eddeb31cd..d9f67b8c06 100644 --- a/internal/component/pyroscope/scrape/manager.go +++ b/internal/component/pyroscope/scrape/manager.go @@ -6,8 +6,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/prometheus/discovery/targetgroup" ) diff --git a/internal/component/pyroscope/scrape/manager_test.go b/internal/component/pyroscope/scrape/manager_test.go index 8b56cf2cbc..4ea5035216 100644 --- a/internal/component/pyroscope/scrape/manager_test.go +++ b/internal/component/pyroscope/scrape/manager_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/scrape/scrape.go b/internal/component/pyroscope/scrape/scrape.go index 9507eced24..7c5f00662a 100644 --- a/internal/component/pyroscope/scrape/scrape.go +++ b/internal/component/pyroscope/scrape/scrape.go @@ -7,17 +7,17 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/grafana/agent/internal/component" - component_config "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/scrape" + "github.com/grafana/alloy/internal/component" + component_config "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/scrape" ) const ( diff --git a/internal/component/pyroscope/scrape/scrape_loop.go b/internal/component/pyroscope/scrape/scrape_loop.go index 8851249927..34d94a7b30 100644 --- a/internal/component/pyroscope/scrape/scrape_loop.go +++ b/internal/component/pyroscope/scrape/scrape_loop.go @@ -11,9 +11,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" commonconfig "github.com/prometheus/common/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/pool" diff --git a/internal/component/pyroscope/scrape/scrape_loop_test.go b/internal/component/pyroscope/scrape/scrape_loop_test.go index 2b2ea5278d..075051adf5 100644 --- a/internal/component/pyroscope/scrape/scrape_loop_test.go +++ b/internal/component/pyroscope/scrape/scrape_loop_test.go @@ -11,9 +11,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/component/pyroscope/scrape/scrape_test.go b/internal/component/pyroscope/scrape/scrape_test.go index 1193b0b37c..57c5c8d6bb 100644 --- a/internal/component/pyroscope/scrape/scrape_test.go +++ b/internal/component/pyroscope/scrape/scrape_test.go @@ -9,12 +9,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/scrape" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/service/cluster" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/scrape" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/service/cluster" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/internal/component/pyroscope/write/write.go b/internal/component/pyroscope/write/write.go index 7ecd1b20fe..ff51ede6d2 100644 --- a/internal/component/pyroscope/write/write.go +++ b/internal/component/pyroscope/write/write.go @@ -7,19 +7,19 @@ import ( "time" "connectrpc.com/connect" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" "github.com/oklog/run" commonconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "go.uber.org/multierr" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/dskit/backoff" pushv1 "github.com/grafana/pyroscope/api/gen/proto/go/push/v1" "github.com/grafana/pyroscope/api/gen/proto/go/push/v1/pushv1connect" diff --git a/internal/component/pyroscope/write/write_test.go b/internal/component/pyroscope/write/write_test.go index d803c3a4d7..73172bb99d 100644 --- a/internal/component/pyroscope/write/write_test.go +++ b/internal/component/pyroscope/write/write_test.go @@ -10,9 +10,9 @@ import ( "time" "connectrpc.com/connect" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/pyroscope" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/pyroscope" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" pushv1 "github.com/grafana/pyroscope/api/gen/proto/go/push/v1" "github.com/grafana/pyroscope/api/gen/proto/go/push/v1/pushv1connect" diff --git a/internal/component/registry.go b/internal/component/registry.go index b382719bcd..47695215a5 100644 --- a/internal/component/registry.go +++ b/internal/component/registry.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/go-kit/log" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" diff --git a/internal/component/remote/http/http.go b/internal/component/remote/http/http.go index 3f9669bbc8..8e22254dad 100644 --- a/internal/component/remote/http/http.go +++ b/internal/component/remote/http/http.go @@ -11,11 +11,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - common_config "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/component" + common_config "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/useragent" "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) diff --git a/internal/component/remote/http/http_test.go b/internal/component/remote/http/http_test.go index ec89669436..dcb0529582 100644 --- a/internal/component/remote/http/http_test.go +++ b/internal/component/remote/http/http_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" - http_component "github.com/grafana/agent/internal/component/remote/http" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/util" + http_component "github.com/grafana/alloy/internal/component/remote/http" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/dskit/backoff" diff --git a/internal/component/remote/kubernetes/configmap/configmap.go b/internal/component/remote/kubernetes/configmap/configmap.go index bb45426238..5489f2dcd2 100644 --- a/internal/component/remote/kubernetes/configmap/configmap.go +++ b/internal/component/remote/kubernetes/configmap/configmap.go @@ -1,9 +1,9 @@ package configmap import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/remote/kubernetes" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/remote/kubernetes" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/remote/kubernetes/kubernetes.go b/internal/component/remote/kubernetes/kubernetes.go index 251cc2d9ae..d0bb046b73 100644 --- a/internal/component/remote/kubernetes/kubernetes.go +++ b/internal/component/remote/kubernetes/kubernetes.go @@ -10,8 +10,8 @@ import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/common/kubernetes" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/common/kubernetes" "github.com/grafana/alloy/syntax/alloytypes" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/internal/component/remote/kubernetes/secret/secret.go b/internal/component/remote/kubernetes/secret/secret.go index f7b3d14758..203889ce4f 100644 --- a/internal/component/remote/kubernetes/secret/secret.go +++ b/internal/component/remote/kubernetes/secret/secret.go @@ -1,9 +1,9 @@ package secret import ( - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/remote/kubernetes" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/remote/kubernetes" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/component/remote/s3/s3.go b/internal/component/remote/s3/s3.go index d6ae76810c..cf1820b9b5 100644 --- a/internal/component/remote/s3/s3.go +++ b/internal/component/remote/s3/s3.go @@ -12,8 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" aws_config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/remote/s3/s3_test.go b/internal/component/remote/s3/s3_test.go index 2672f8da3a..37d711bc4c 100644 --- a/internal/component/remote/s3/s3_test.go +++ b/internal/component/remote/s3/s3_test.go @@ -8,7 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/stretchr/testify/require" ) diff --git a/internal/component/remote/vault/refresher.go b/internal/component/remote/vault/refresher.go index 479e7184eb..70338304a9 100644 --- a/internal/component/remote/vault/refresher.go +++ b/internal/component/remote/vault/refresher.go @@ -7,8 +7,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging/level" vault "github.com/hashicorp/vault/api" "github.com/prometheus/client_golang/prometheus" ) diff --git a/internal/component/remote/vault/vault.go b/internal/component/remote/vault/vault.go index cfe4fe85ee..0438d3d491 100644 --- a/internal/component/remote/vault/vault.go +++ b/internal/component/remote/vault/vault.go @@ -7,9 +7,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/alloytypes" "github.com/oklog/run" diff --git a/internal/component/remote/vault/vault_test.go b/internal/component/remote/vault/vault_test.go index bf26c4d06f..4920c13dd4 100644 --- a/internal/component/remote/vault/vault_test.go +++ b/internal/component/remote/vault/vault_test.go @@ -12,8 +12,8 @@ import ( "github.com/docker/go-connections/nat" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/alloytypes" "github.com/stretchr/testify/require" diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 3b9cf459bb..124a1c0513 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -5,10 +5,10 @@ package converter import ( "fmt" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert" - "github.com/grafana/agent/internal/converter/internal/promtailconvert" - "github.com/grafana/agent/internal/converter/internal/staticconvert" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert" + "github.com/grafana/alloy/internal/converter/internal/promtailconvert" + "github.com/grafana/alloy/internal/converter/internal/staticconvert" ) // Input represents the type of config file being fed into the converter. diff --git a/internal/converter/internal/common/convert_logs_receiver.go b/internal/converter/internal/common/convert_logs_receiver.go index b3110af4d8..ed0aacb8a2 100644 --- a/internal/converter/internal/common/convert_logs_receiver.go +++ b/internal/converter/internal/common/convert_logs_receiver.go @@ -1,7 +1,7 @@ package common import ( - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" diff --git a/internal/converter/internal/common/convert_targets.go b/internal/converter/internal/common/convert_targets.go index 15a7116deb..a10d3bfb48 100644 --- a/internal/converter/internal/common/convert_targets.go +++ b/internal/converter/internal/common/convert_targets.go @@ -1,7 +1,7 @@ package common import ( - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" "github.com/grafana/alloy/syntax" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" diff --git a/internal/converter/internal/common/convert_targets_test.go b/internal/converter/internal/common/convert_targets_test.go index ecc847f2d8..cfcb20bae9 100644 --- a/internal/converter/internal/common/convert_targets_test.go +++ b/internal/converter/internal/common/convert_targets_test.go @@ -3,8 +3,8 @@ package common_test import ( "testing" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/token/builder" "github.com/stretchr/testify/require" ) diff --git a/internal/converter/internal/common/http_client_config.go b/internal/converter/internal/common/http_client_config.go index f144f748fc..8c267d2a3a 100644 --- a/internal/converter/internal/common/http_client_config.go +++ b/internal/converter/internal/common/http_client_config.go @@ -3,8 +3,8 @@ package common import ( "reflect" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" ) diff --git a/internal/converter/internal/common/river_utils.go b/internal/converter/internal/common/river_utils.go index 4ee518babc..81e1a38ca5 100644 --- a/internal/converter/internal/common/river_utils.go +++ b/internal/converter/internal/common/river_utils.go @@ -11,10 +11,10 @@ import ( "github.com/grafana/alloy/syntax/printer" "github.com/grafana/alloy/syntax/scanner" - "github.com/grafana/agent/internal/component" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/component" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/token/builder" ) diff --git a/internal/converter/internal/common/river_utils_test.go b/internal/converter/internal/common/river_utils_test.go index 57ccd53fb8..509b03904f 100644 --- a/internal/converter/internal/common/river_utils_test.go +++ b/internal/converter/internal/common/river_utils_test.go @@ -3,7 +3,7 @@ package common_test import ( "testing" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax" "github.com/stretchr/testify/require" ) diff --git a/internal/converter/internal/common/validate.go b/internal/converter/internal/common/validate.go index 57e750f6c1..9921ee1dcd 100644 --- a/internal/converter/internal/common/validate.go +++ b/internal/converter/internal/common/validate.go @@ -5,7 +5,7 @@ import ( "reflect" "strings" - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/token/builder" ) diff --git a/internal/converter/internal/common/validate_test.go b/internal/converter/internal/common/validate_test.go index 64b95e4de9..b693b6eed8 100644 --- a/internal/converter/internal/common/validate_test.go +++ b/internal/converter/internal/common/validate_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/stretchr/testify/require" ) diff --git a/internal/converter/internal/common/weaveworks_server.go b/internal/converter/internal/common/weaveworks_server.go index b0ed5f038a..6730e0c55f 100644 --- a/internal/converter/internal/common/weaveworks_server.go +++ b/internal/converter/internal/common/weaveworks_server.go @@ -3,8 +3,8 @@ package common import ( "github.com/grafana/dskit/server" - fnet "github.com/grafana/agent/internal/component/common/net" - "github.com/grafana/agent/internal/converter/diag" + fnet "github.com/grafana/alloy/internal/component/common/net" + "github.com/grafana/alloy/internal/converter/diag" ) func DefaultWeaveWorksServerCfg() server.Config { diff --git a/internal/converter/internal/otelcolconvert/converter.go b/internal/converter/internal/otelcolconvert/converter.go index 147a18ad65..128fb1683d 100644 --- a/internal/converter/internal/otelcolconvert/converter.go +++ b/internal/converter/internal/otelcolconvert/converter.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/token/builder" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/otelcol" diff --git a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go index c9b9486b26..7c6b9371df 100644 --- a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/attributes" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/attributes" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go index 9bbd6c29d2..ce95cfb82c 100644 --- a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -3,9 +3,9 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol/auth/basic" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol/auth/basic" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" "go.opentelemetry.io/collector/component" diff --git a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go index df234aee2f..b6c24fb914 100644 --- a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/batch" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/batch" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/processor/batchprocessor" ) diff --git a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go index 5b498b4fa5..1c68636636 100644 --- a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component/local/file" - "github.com/grafana/agent/internal/component/otelcol/auth/bearer" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/local/file" + "github.com/grafana/alloy/internal/component/otelcol/auth/bearer" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/token/builder" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" diff --git a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go index 71cb6749d3..06ea80f2c1 100644 --- a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/filter" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/filter" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go index 658270ff1e..64e8bb4cce 100644 --- a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go +++ b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go @@ -3,9 +3,9 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol/auth/headers" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol/auth/headers" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" "go.opentelemetry.io/collector/component" diff --git a/internal/converter/internal/otelcolconvert/converter_helpers.go b/internal/converter/internal/otelcolconvert/converter_helpers.go index 7942e74254..2a33faf56c 100644 --- a/internal/converter/internal/otelcolconvert/converter_helpers.go +++ b/internal/converter/internal/otelcolconvert/converter_helpers.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" "github.com/grafana/alloy/syntax/token" "github.com/grafana/alloy/syntax/token/builder" "github.com/mitchellh/mapstructure" diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go index c7cb41c5ea..9ee1a2723c 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/jaeger" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/jaeger" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go index 2076a7290d..51e66fa29e 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go @@ -3,9 +3,9 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol/extension/jaeger_remote_sampling" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go index abd109bbd9..bb0db334e9 100644 --- a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/k8sattributes" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/k8sattributes" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index 7236438908..dee9d99aef 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/davecgh/go-spew/spew" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/kafka" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index a01136e1d2..961c170832 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -5,11 +5,11 @@ import ( "strings" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/exporter/loadbalancing" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go index 76d85cd2f0..226fc9d28f 100644 --- a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go @@ -3,9 +3,9 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol/exporter/logging" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol/exporter/logging" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/loggingexporter" "go.uber.org/zap/zapcore" diff --git a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go index f870cf4848..f9e39a7b08 100644 --- a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/memorylimiter" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/memorylimiter" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/processor/memorylimiterprocessor" ) diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go index 94eda15664..95f82414be 100644 --- a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -3,9 +3,9 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol/auth/oauth2" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" "go.opentelemetry.io/collector/component" diff --git a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go index 78ae892ce4..4745582e30 100644 --- a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/opencensus" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/opencensus" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go index 230478144c..5584ed8686 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go @@ -5,11 +5,11 @@ import ( "strings" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/exporter/otlp" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/exporter/otlp" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configopaque" diff --git a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go index d978e89c6a..abc363514e 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go @@ -6,11 +6,11 @@ import ( "time" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/auth" - "github.com/grafana/agent/internal/component/otelcol/exporter/otlphttp" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/auth" + "github.com/grafana/alloy/internal/component/otelcol/exporter/otlphttp" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/exporter/otlphttpexporter" diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index 6e5202aeca..7e23bab1a9 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/otlp" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/otlp" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" diff --git a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go index de800410ae..f7ef7d482f 100644 --- a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/probabilistic_sampler" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/probabilistic_sampler" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go index 5b26f953e2..d72ce66d1b 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go +++ b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/connector/spanmetrics" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/connector/spanmetrics" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go index 6604fc9601..fe6f887699 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/span" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/span" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/plog" diff --git a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go index 1bd2828c5f..b292d00d66 100644 --- a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/davecgh/go-spew/spew" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/tail_sampling" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/tail_sampling" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go index 694046bb21..dec37c3c1d 100644 --- a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/processor/transform" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/processor/transform" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go index 66c0f7d52e..28ca8126c9 100644 --- a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go @@ -3,10 +3,10 @@ package otelcolconvert import ( "fmt" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/component/otelcol/receiver/zipkin" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol/receiver/zipkin" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" "go.opentelemetry.io/collector/component" ) diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index 97ae03a387..d6b04c0e9d 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -6,8 +6,8 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/token/builder" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert_test.go b/internal/converter/internal/otelcolconvert/otelcolconvert_test.go index 570a5dc08a..fa5255cf9a 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert_test.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert_test.go @@ -3,8 +3,8 @@ package otelcolconvert_test import ( "testing" - "github.com/grafana/agent/internal/converter/internal/otelcolconvert" - "github.com/grafana/agent/internal/converter/internal/test_common" + "github.com/grafana/alloy/internal/converter/internal/otelcolconvert" + "github.com/grafana/alloy/internal/converter/internal/test_common" ) func TestConvert(t *testing.T) { diff --git a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go index 69e3493186..176ac89ef7 100644 --- a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go +++ b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/token/builder" ) diff --git a/internal/converter/internal/prometheusconvert/component/azure.go b/internal/converter/internal/prometheusconvert/component/azure.go index 5f98bbee56..47505a5e09 100644 --- a/internal/converter/internal/prometheusconvert/component/azure.go +++ b/internal/converter/internal/prometheusconvert/component/azure.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/azure" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/azure" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_azure "github.com/prometheus/prometheus/discovery/azure" ) diff --git a/internal/converter/internal/prometheusconvert/component/consul.go b/internal/converter/internal/prometheusconvert/component/consul.go index 6e6bf672e8..39ac98321c 100644 --- a/internal/converter/internal/prometheusconvert/component/consul.go +++ b/internal/converter/internal/prometheusconvert/component/consul.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/consul" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/consul" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_consul "github.com/prometheus/prometheus/discovery/consul" ) diff --git a/internal/converter/internal/prometheusconvert/component/digitalocean.go b/internal/converter/internal/prometheusconvert/component/digitalocean.go index 2d5d653d97..6ab669e51c 100644 --- a/internal/converter/internal/prometheusconvert/component/digitalocean.go +++ b/internal/converter/internal/prometheusconvert/component/digitalocean.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/digitalocean" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/digitalocean" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_config "github.com/prometheus/common/config" prom_digitalocean "github.com/prometheus/prometheus/discovery/digitalocean" diff --git a/internal/converter/internal/prometheusconvert/component/dns.go b/internal/converter/internal/prometheusconvert/component/dns.go index 995d57d10f..5e487c48f1 100644 --- a/internal/converter/internal/prometheusconvert/component/dns.go +++ b/internal/converter/internal/prometheusconvert/component/dns.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/dns" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/dns" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_dns "github.com/prometheus/prometheus/discovery/dns" ) diff --git a/internal/converter/internal/prometheusconvert/component/docker.go b/internal/converter/internal/prometheusconvert/component/docker.go index 8523d39484..2fb0be2894 100644 --- a/internal/converter/internal/prometheusconvert/component/docker.go +++ b/internal/converter/internal/prometheusconvert/component/docker.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/docker" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/docker" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_moby "github.com/prometheus/prometheus/discovery/moby" ) diff --git a/internal/converter/internal/prometheusconvert/component/dockerswarm.go b/internal/converter/internal/prometheusconvert/component/dockerswarm.go index 46d4ce9af3..852f720af1 100644 --- a/internal/converter/internal/prometheusconvert/component/dockerswarm.go +++ b/internal/converter/internal/prometheusconvert/component/dockerswarm.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/dockerswarm" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/dockerswarm" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_moby "github.com/prometheus/prometheus/discovery/moby" ) diff --git a/internal/converter/internal/prometheusconvert/component/ec2.go b/internal/converter/internal/prometheusconvert/component/ec2.go index 46bf78ff11..7afa8996d5 100644 --- a/internal/converter/internal/prometheusconvert/component/ec2.go +++ b/internal/converter/internal/prometheusconvert/component/ec2.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/aws" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/aws" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) diff --git a/internal/converter/internal/prometheusconvert/component/file.go b/internal/converter/internal/prometheusconvert/component/file.go index 0ff6f32fc6..396b016212 100644 --- a/internal/converter/internal/prometheusconvert/component/file.go +++ b/internal/converter/internal/prometheusconvert/component/file.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/file" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/file" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_file "github.com/prometheus/prometheus/discovery/file" ) diff --git a/internal/converter/internal/prometheusconvert/component/gce.go b/internal/converter/internal/prometheusconvert/component/gce.go index 755dab6995..7cf8cd6b23 100644 --- a/internal/converter/internal/prometheusconvert/component/gce.go +++ b/internal/converter/internal/prometheusconvert/component/gce.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/gce" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/gce" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_gce "github.com/prometheus/prometheus/discovery/gce" ) diff --git a/internal/converter/internal/prometheusconvert/component/http.go b/internal/converter/internal/prometheusconvert/component/http.go index 5a39a97657..922cd90019 100644 --- a/internal/converter/internal/prometheusconvert/component/http.go +++ b/internal/converter/internal/prometheusconvert/component/http.go @@ -4,12 +4,12 @@ import ( "net/url" "time" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/http" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/http" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_http "github.com/prometheus/prometheus/discovery/http" ) diff --git a/internal/converter/internal/prometheusconvert/component/ionos.go b/internal/converter/internal/prometheusconvert/component/ionos.go index 76a34c587a..aa236dca4a 100644 --- a/internal/converter/internal/prometheusconvert/component/ionos.go +++ b/internal/converter/internal/prometheusconvert/component/ionos.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/ionos" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/ionos" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_ionos "github.com/prometheus/prometheus/discovery/ionos" ) diff --git a/internal/converter/internal/prometheusconvert/component/kubernetes.go b/internal/converter/internal/prometheusconvert/component/kubernetes.go index bd3595dc6e..06e9a5b353 100644 --- a/internal/converter/internal/prometheusconvert/component/kubernetes.go +++ b/internal/converter/internal/prometheusconvert/component/kubernetes.go @@ -1,12 +1,12 @@ package component import ( - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/kubernetes" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/kubernetes" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_kubernetes "github.com/prometheus/prometheus/discovery/kubernetes" ) diff --git a/internal/converter/internal/prometheusconvert/component/kuma.go b/internal/converter/internal/prometheusconvert/component/kuma.go index 6de9b6a26e..a4ea2cfa6f 100644 --- a/internal/converter/internal/prometheusconvert/component/kuma.go +++ b/internal/converter/internal/prometheusconvert/component/kuma.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/kuma" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/kuma" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_kuma "github.com/prometheus/prometheus/discovery/xds" ) diff --git a/internal/converter/internal/prometheusconvert/component/lightsail.go b/internal/converter/internal/prometheusconvert/component/lightsail.go index 765d504ad8..a0fcfd1f7c 100644 --- a/internal/converter/internal/prometheusconvert/component/lightsail.go +++ b/internal/converter/internal/prometheusconvert/component/lightsail.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/aws" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/aws" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) diff --git a/internal/converter/internal/prometheusconvert/component/linode.go b/internal/converter/internal/prometheusconvert/component/linode.go index 42d00efc03..074d058e70 100644 --- a/internal/converter/internal/prometheusconvert/component/linode.go +++ b/internal/converter/internal/prometheusconvert/component/linode.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/linode" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/linode" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_linode "github.com/prometheus/prometheus/discovery/linode" ) diff --git a/internal/converter/internal/prometheusconvert/component/marathon.go b/internal/converter/internal/prometheusconvert/component/marathon.go index 21cb6be3ef..fc4d116234 100644 --- a/internal/converter/internal/prometheusconvert/component/marathon.go +++ b/internal/converter/internal/prometheusconvert/component/marathon.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/marathon" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/marathon" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_marathon "github.com/prometheus/prometheus/discovery/marathon" ) diff --git a/internal/converter/internal/prometheusconvert/component/nerve.go b/internal/converter/internal/prometheusconvert/component/nerve.go index 499228a3ad..5f1bc9d4b7 100644 --- a/internal/converter/internal/prometheusconvert/component/nerve.go +++ b/internal/converter/internal/prometheusconvert/component/nerve.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/nerve" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/nerve" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_nerve "github.com/prometheus/prometheus/discovery/zookeeper" ) diff --git a/internal/converter/internal/prometheusconvert/component/openstack.go b/internal/converter/internal/prometheusconvert/component/openstack.go index 0ae31971ea..6e6f537d25 100644 --- a/internal/converter/internal/prometheusconvert/component/openstack.go +++ b/internal/converter/internal/prometheusconvert/component/openstack.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/openstack" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/openstack" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_openstack "github.com/prometheus/prometheus/discovery/openstack" ) diff --git a/internal/converter/internal/prometheusconvert/component/ovhcloud.go b/internal/converter/internal/prometheusconvert/component/ovhcloud.go index cef30cd058..67a0a04a71 100644 --- a/internal/converter/internal/prometheusconvert/component/ovhcloud.go +++ b/internal/converter/internal/prometheusconvert/component/ovhcloud.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/ovhcloud" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/ovhcloud" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" ) diff --git a/internal/converter/internal/prometheusconvert/component/relabel.go b/internal/converter/internal/prometheusconvert/component/relabel.go index 3102bb576d..7e8264473d 100644 --- a/internal/converter/internal/prometheusconvert/component/relabel.go +++ b/internal/converter/internal/prometheusconvert/component/relabel.go @@ -3,12 +3,12 @@ package component import ( "fmt" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - disc_relabel "github.com/grafana/agent/internal/component/discovery/relabel" - "github.com/grafana/agent/internal/component/prometheus/relabel" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + disc_relabel "github.com/grafana/alloy/internal/component/discovery/relabel" + "github.com/grafana/alloy/internal/component/prometheus/relabel" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_relabel "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage" ) diff --git a/internal/converter/internal/prometheusconvert/component/remote_write.go b/internal/converter/internal/prometheusconvert/component/remote_write.go index e2f44ad947..4b1156babd 100644 --- a/internal/converter/internal/prometheusconvert/component/remote_write.go +++ b/internal/converter/internal/prometheusconvert/component/remote_write.go @@ -5,10 +5,10 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/component/prometheus/remotewrite" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" "github.com/prometheus/common/sigv4" prom_config "github.com/prometheus/prometheus/config" diff --git a/internal/converter/internal/prometheusconvert/component/scaleway.go b/internal/converter/internal/prometheusconvert/component/scaleway.go index aef4bc0187..98e17ae468 100644 --- a/internal/converter/internal/prometheusconvert/component/scaleway.go +++ b/internal/converter/internal/prometheusconvert/component/scaleway.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/scaleway" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/scaleway" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/alloy/syntax/alloytypes" prom_scaleway "github.com/prometheus/prometheus/discovery/scaleway" ) diff --git a/internal/converter/internal/prometheusconvert/component/scrape.go b/internal/converter/internal/prometheusconvert/component/scrape.go index c22b8470da..a5f6da29ad 100644 --- a/internal/converter/internal/prometheusconvert/component/scrape.go +++ b/internal/converter/internal/prometheusconvert/component/scrape.go @@ -7,12 +7,12 @@ import ( "golang.org/x/exp/maps" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/scrape" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/scrape" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/service/cluster" prom_config "github.com/prometheus/prometheus/config" prom_discovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/storage" diff --git a/internal/converter/internal/prometheusconvert/component/serverset.go b/internal/converter/internal/prometheusconvert/component/serverset.go index 6cea232a09..d0351ab1a9 100644 --- a/internal/converter/internal/prometheusconvert/component/serverset.go +++ b/internal/converter/internal/prometheusconvert/component/serverset.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/serverset" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/serverset" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_zk "github.com/prometheus/prometheus/discovery/zookeeper" ) diff --git a/internal/converter/internal/prometheusconvert/component/service_discovery.go b/internal/converter/internal/prometheusconvert/component/service_discovery.go index 566af1ef56..6dcca0fac8 100644 --- a/internal/converter/internal/prometheusconvert/component/service_discovery.go +++ b/internal/converter/internal/prometheusconvert/component/service_discovery.go @@ -3,10 +3,10 @@ package component import ( "fmt" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_discover "github.com/prometheus/prometheus/discovery" prom_http "github.com/prometheus/prometheus/discovery/http" diff --git a/internal/converter/internal/prometheusconvert/component/triton.go b/internal/converter/internal/prometheusconvert/component/triton.go index 4868327ca3..40ecae8015 100644 --- a/internal/converter/internal/prometheusconvert/component/triton.go +++ b/internal/converter/internal/prometheusconvert/component/triton.go @@ -3,11 +3,11 @@ package component import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/triton" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/triton" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" prom_triton "github.com/prometheus/prometheus/discovery/triton" ) diff --git a/internal/converter/internal/prometheusconvert/prometheusconvert.go b/internal/converter/internal/prometheusconvert/prometheusconvert.go index 328da9903d..7c6d00897f 100644 --- a/internal/converter/internal/prometheusconvert/prometheusconvert.go +++ b/internal/converter/internal/prometheusconvert/prometheusconvert.go @@ -5,12 +5,12 @@ import ( "fmt" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/remotewrite" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/component" prom_config "github.com/prometheus/prometheus/config" prom_discover "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/storage" diff --git a/internal/converter/internal/prometheusconvert/prometheusconvert_test.go b/internal/converter/internal/prometheusconvert/prometheusconvert_test.go index 0e7b88fd35..f9c13ce874 100644 --- a/internal/converter/internal/prometheusconvert/prometheusconvert_test.go +++ b/internal/converter/internal/prometheusconvert/prometheusconvert_test.go @@ -3,9 +3,9 @@ package prometheusconvert_test import ( "testing" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert" - "github.com/grafana/agent/internal/converter/internal/test_common" - _ "github.com/grafana/agent/internal/static/metrics/instance" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert" + "github.com/grafana/alloy/internal/converter/internal/test_common" + _ "github.com/grafana/alloy/internal/static/metrics/instance" ) func TestConvert(t *testing.T) { diff --git a/internal/converter/internal/prometheusconvert/validate.go b/internal/converter/internal/prometheusconvert/validate.go index 0bca959958..e5c6f7941f 100644 --- a/internal/converter/internal/prometheusconvert/validate.go +++ b/internal/converter/internal/prometheusconvert/validate.go @@ -1,9 +1,9 @@ package prometheusconvert import ( - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/component" prom_config "github.com/prometheus/prometheus/config" prom_discover "github.com/prometheus/prometheus/discovery" diff --git a/internal/converter/internal/promtailconvert/internal/build/azure_event_hub.go b/internal/converter/internal/promtailconvert/internal/build/azure_event_hub.go index 417f54fa3c..f7a3f7ff0d 100644 --- a/internal/converter/internal/promtailconvert/internal/build/azure_event_hub.go +++ b/internal/converter/internal/promtailconvert/internal/build/azure_event_hub.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/azure_event_hubs" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/azure_event_hubs" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendAzureEventHubs() { diff --git a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go index 00a6cd0f54..05100b94f7 100644 --- a/internal/converter/internal/promtailconvert/internal/build/cloudflare.go +++ b/internal/converter/internal/promtailconvert/internal/build/cloudflare.go @@ -3,9 +3,9 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/loki/source/cloudflare" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/loki/source/cloudflare" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go index 2b7d29a1e1..f97544f157 100644 --- a/internal/converter/internal/promtailconvert/internal/build/consul_agent.go +++ b/internal/converter/internal/promtailconvert/internal/build/consul_agent.go @@ -3,9 +3,9 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/discovery/consulagent" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/discovery/consulagent" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" promtail_consulagent "github.com/grafana/loki/clients/pkg/promtail/discovery/consulagent" ) diff --git a/internal/converter/internal/promtailconvert/internal/build/docker_sd.go b/internal/converter/internal/promtailconvert/internal/build/docker_sd.go index 8a337faf6a..f50d92634c 100644 --- a/internal/converter/internal/promtailconvert/internal/build/docker_sd.go +++ b/internal/converter/internal/promtailconvert/internal/build/docker_sd.go @@ -3,12 +3,12 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/docker" - loki_docker "github.com/grafana/agent/internal/component/loki/source/docker" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/docker" + loki_docker "github.com/grafana/alloy/internal/component/loki/source/docker" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/prometheus/prometheus/discovery/moby" ) diff --git a/internal/converter/internal/promtailconvert/internal/build/gcplog.go b/internal/converter/internal/promtailconvert/internal/build/gcplog.go index b45efa2d51..71af0e89de 100644 --- a/internal/converter/internal/promtailconvert/internal/build/gcplog.go +++ b/internal/converter/internal/promtailconvert/internal/build/gcplog.go @@ -1,11 +1,11 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/gcplog" - "github.com/grafana/agent/internal/component/loki/source/gcplog/gcptypes" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/gcplog" + "github.com/grafana/alloy/internal/component/loki/source/gcplog/gcptypes" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendGCPLog() { diff --git a/internal/converter/internal/promtailconvert/internal/build/gelf.go b/internal/converter/internal/promtailconvert/internal/build/gelf.go index 2aed0bc6aa..43d9e7bd78 100644 --- a/internal/converter/internal/promtailconvert/internal/build/gelf.go +++ b/internal/converter/internal/promtailconvert/internal/build/gelf.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/gelf" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/gelf" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendGelfConfig() { diff --git a/internal/converter/internal/promtailconvert/internal/build/global_context.go b/internal/converter/internal/promtailconvert/internal/build/global_context.go index a47ad122ce..1589ce2884 100644 --- a/internal/converter/internal/promtailconvert/internal/build/global_context.go +++ b/internal/converter/internal/promtailconvert/internal/build/global_context.go @@ -3,7 +3,7 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" ) type GlobalContext struct { diff --git a/internal/converter/internal/promtailconvert/internal/build/herokudrain.go b/internal/converter/internal/promtailconvert/internal/build/herokudrain.go index 59b7d197e9..efe009e32b 100644 --- a/internal/converter/internal/promtailconvert/internal/build/herokudrain.go +++ b/internal/converter/internal/promtailconvert/internal/build/herokudrain.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/heroku" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/heroku" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendHerokuDrainConfig() { diff --git a/internal/converter/internal/promtailconvert/internal/build/journal.go b/internal/converter/internal/promtailconvert/internal/build/journal.go index e693f310b4..4d1b617527 100644 --- a/internal/converter/internal/promtailconvert/internal/build/journal.go +++ b/internal/converter/internal/promtailconvert/internal/build/journal.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - flowrelabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/journal" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + flowrelabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/journal" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendJournalConfig() { diff --git a/internal/converter/internal/promtailconvert/internal/build/kafka.go b/internal/converter/internal/promtailconvert/internal/build/kafka.go index d00a6c98c4..e40332ea2f 100644 --- a/internal/converter/internal/promtailconvert/internal/build/kafka.go +++ b/internal/converter/internal/promtailconvert/internal/build/kafka.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/kafka" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/kafka" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" ) diff --git a/internal/converter/internal/promtailconvert/internal/build/loki_write.go b/internal/converter/internal/promtailconvert/internal/build/loki_write.go index acca65d958..208249f843 100644 --- a/internal/converter/internal/promtailconvert/internal/build/loki_write.go +++ b/internal/converter/internal/promtailconvert/internal/build/loki_write.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/common/loki" - lokiwrite "github.com/grafana/agent/internal/component/loki/write" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/loki" + lokiwrite "github.com/grafana/alloy/internal/component/loki/write" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/loki/clients/pkg/promtail/client" lokiflag "github.com/grafana/loki/pkg/util/flagext" diff --git a/internal/converter/internal/promtailconvert/internal/build/push_api.go b/internal/converter/internal/promtailconvert/internal/build/push_api.go index 849066a733..12f20df51e 100644 --- a/internal/converter/internal/promtailconvert/internal/build/push_api.go +++ b/internal/converter/internal/promtailconvert/internal/build/push_api.go @@ -3,10 +3,10 @@ package build import ( "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/api" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/api" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendPushAPI() { diff --git a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go index 4db2f44abc..2efb4a1271 100644 --- a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -5,17 +5,17 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/discovery/relabel" - filematch "github.com/grafana/agent/internal/component/local/file_match" - "github.com/grafana/agent/internal/component/loki/process" - "github.com/grafana/agent/internal/component/loki/process/stages" - lokirelabel "github.com/grafana/agent/internal/component/loki/relabel" - lokisourcefile "github.com/grafana/agent/internal/component/loki/source/file" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery/relabel" + filematch "github.com/grafana/alloy/internal/component/local/file_match" + "github.com/grafana/alloy/internal/component/loki/process" + "github.com/grafana/alloy/internal/component/loki/process/stages" + lokirelabel "github.com/grafana/alloy/internal/component/loki/relabel" + lokisourcefile "github.com/grafana/alloy/internal/component/loki/source/file" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/component" "github.com/grafana/alloy/syntax/scanner" "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" diff --git a/internal/converter/internal/promtailconvert/internal/build/service_discovery.go b/internal/converter/internal/promtailconvert/internal/build/service_discovery.go index 533f5c8c2b..717cfda439 100644 --- a/internal/converter/internal/promtailconvert/internal/build/service_discovery.go +++ b/internal/converter/internal/promtailconvert/internal/build/service_discovery.go @@ -3,11 +3,11 @@ package build import ( "fmt" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert/build" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/prometheus/common/model" diff --git a/internal/converter/internal/promtailconvert/internal/build/stages.go b/internal/converter/internal/promtailconvert/internal/build/stages.go index a303949e18..294c786fe0 100644 --- a/internal/converter/internal/promtailconvert/internal/build/stages.go +++ b/internal/converter/internal/promtailconvert/internal/build/stages.go @@ -12,9 +12,9 @@ import ( "github.com/grafana/loki/pkg/util/flagext" "github.com/mitchellh/mapstructure" - "github.com/grafana/agent/internal/component/loki/process/metric" - "github.com/grafana/agent/internal/component/loki/process/stages" - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/component/loki/process/metric" + "github.com/grafana/alloy/internal/component/loki/process/stages" + "github.com/grafana/alloy/internal/converter/diag" ) func convertStage(st interface{}, diags *diag.Diagnostics) (stages.StageConfig, bool) { diff --git a/internal/converter/internal/promtailconvert/internal/build/syslog.go b/internal/converter/internal/promtailconvert/internal/build/syslog.go index 31ac4983c4..8dfcae1847 100644 --- a/internal/converter/internal/promtailconvert/internal/build/syslog.go +++ b/internal/converter/internal/promtailconvert/internal/build/syslog.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/source/syslog" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/source/syslog" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendSyslogConfig() { diff --git a/internal/converter/internal/promtailconvert/internal/build/windows_events.go b/internal/converter/internal/promtailconvert/internal/build/windows_events.go index e1784535b3..c3bcd81858 100644 --- a/internal/converter/internal/promtailconvert/internal/build/windows_events.go +++ b/internal/converter/internal/promtailconvert/internal/build/windows_events.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/loki/source/windowsevent" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/loki/source/windowsevent" + "github.com/grafana/alloy/internal/converter/internal/common" ) func (s *ScrapeConfigBuilder) AppendWindowsEventsConfig() { diff --git a/internal/converter/internal/promtailconvert/promtailconvert.go b/internal/converter/internal/promtailconvert/promtailconvert.go index de30dd7d17..b99db61bdc 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert.go +++ b/internal/converter/internal/promtailconvert/promtailconvert.go @@ -5,10 +5,10 @@ import ( "flag" "fmt" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/promtailconvert/internal/build" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/promtailconvert/internal/build" "github.com/grafana/alloy/syntax/token/builder" "github.com/grafana/dskit/flagext" promtailcfg "github.com/grafana/loki/clients/pkg/promtail/config" diff --git a/internal/converter/internal/promtailconvert/promtailconvert_test.go b/internal/converter/internal/promtailconvert/promtailconvert_test.go index 63ea75afbe..985efa49ee 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert_test.go +++ b/internal/converter/internal/promtailconvert/promtailconvert_test.go @@ -3,9 +3,9 @@ package promtailconvert_test import ( "testing" - "github.com/grafana/agent/internal/converter/internal/promtailconvert" - "github.com/grafana/agent/internal/converter/internal/test_common" - _ "github.com/grafana/agent/internal/static/metrics/instance" // Imported to override default values via the init function. + "github.com/grafana/alloy/internal/converter/internal/promtailconvert" + "github.com/grafana/alloy/internal/converter/internal/test_common" + _ "github.com/grafana/alloy/internal/static/metrics/instance" // Imported to override default values via the init function. ) func TestConvert(t *testing.T) { diff --git a/internal/converter/internal/promtailconvert/validate.go b/internal/converter/internal/promtailconvert/validate.go index 182203e287..8833b36c57 100644 --- a/internal/converter/internal/promtailconvert/validate.go +++ b/internal/converter/internal/promtailconvert/validate.go @@ -1,7 +1,7 @@ package promtailconvert import ( - "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/diag" promtailcfg "github.com/grafana/loki/clients/pkg/promtail/config" ) diff --git a/internal/converter/internal/staticconvert/internal/build/apache_exporter.go b/internal/converter/internal/staticconvert/internal/build/apache_exporter.go index df368bb744..a8a1aaef8e 100644 --- a/internal/converter/internal/staticconvert/internal/build/apache_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/apache_exporter.go @@ -1,10 +1,10 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/apache" - "github.com/grafana/agent/internal/static/integrations/apache_http" - apache_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/apache_http" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/apache" + "github.com/grafana/alloy/internal/static/integrations/apache_http" + apache_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/apache_http" ) func (b *ConfigBuilder) appendApacheExporter(config *apache_http.Config) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go index f3ebf96185..a3e2b1c9bb 100644 --- a/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go +++ b/internal/converter/internal/staticconvert/internal/build/app_agent_receiver.go @@ -4,12 +4,12 @@ import ( "fmt" "github.com/alecthomas/units" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/component/faro/receiver" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - app_agent_receiver_v2 "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/faro/receiver" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + app_agent_receiver_v2 "github.com/grafana/alloy/internal/static/integrations/v2/app_agent_receiver" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/scanner" ) diff --git a/internal/converter/internal/staticconvert/internal/build/azure_exporter.go b/internal/converter/internal/staticconvert/internal/build/azure_exporter.go index d099c67849..fad38b1c2d 100644 --- a/internal/converter/internal/staticconvert/internal/build/azure_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/azure_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/azure" - "github.com/grafana/agent/internal/static/integrations/azure_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/azure" + "github.com/grafana/alloy/internal/static/integrations/azure_exporter" ) func (b *ConfigBuilder) appendAzureExporter(config *azure_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go index 2de0c0de82..f355221ba0 100644 --- a/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/blackbox_exporter.go @@ -3,10 +3,10 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/blackbox" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - blackbox_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/blackbox" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + blackbox_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/blackbox_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/builder.go b/internal/converter/internal/staticconvert/internal/build/builder.go index c36d51fa30..21276d59f6 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder.go +++ b/internal/converter/internal/staticconvert/internal/build/builder.go @@ -3,8 +3,8 @@ package build import ( "strings" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/static/config" "github.com/grafana/alloy/syntax/token/builder" ) diff --git a/internal/converter/internal/staticconvert/internal/build/builder_integrations.go b/internal/converter/internal/staticconvert/internal/build/builder_integrations.go index 369210bcd7..f5bf89c9a0 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_integrations.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_integrations.go @@ -4,48 +4,48 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/remotewrite" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert" - "github.com/grafana/agent/internal/static/config" - agent_exporter "github.com/grafana/agent/internal/static/integrations/agent" - "github.com/grafana/agent/internal/static/integrations/apache_http" - "github.com/grafana/agent/internal/static/integrations/azure_exporter" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - "github.com/grafana/agent/internal/static/integrations/cadvisor" - "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" - int_config "github.com/grafana/agent/internal/static/integrations/config" - "github.com/grafana/agent/internal/static/integrations/consul_exporter" - "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" - "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - "github.com/grafana/agent/internal/static/integrations/gcp_exporter" - "github.com/grafana/agent/internal/static/integrations/github_exporter" - "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - "github.com/grafana/agent/internal/static/integrations/memcached_exporter" - "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - mssql_exporter "github.com/grafana/agent/internal/static/integrations/mssql" - "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - "github.com/grafana/agent/internal/static/integrations/node_exporter" - "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - "github.com/grafana/agent/internal/static/integrations/process_exporter" - "github.com/grafana/agent/internal/static/integrations/redis_exporter" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - "github.com/grafana/agent/internal/static/integrations/squid_exporter" - "github.com/grafana/agent/internal/static/integrations/statsd_exporter" - agent_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/agent" - apache_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/apache_http" - app_agent_receiver_v2 "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" - blackbox_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" - common_v2 "github.com/grafana/agent/internal/static/integrations/v2/common" - eventhandler_v2 "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" - metricsutils_v2 "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" - snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" - "github.com/grafana/agent/internal/static/integrations/windows_exporter" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert" + "github.com/grafana/alloy/internal/static/config" + agent_exporter "github.com/grafana/alloy/internal/static/integrations/agent" + "github.com/grafana/alloy/internal/static/integrations/apache_http" + "github.com/grafana/alloy/internal/static/integrations/azure_exporter" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + "github.com/grafana/alloy/internal/static/integrations/cadvisor" + "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" + int_config "github.com/grafana/alloy/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/consul_exporter" + "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" + "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" + "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" + "github.com/grafana/alloy/internal/static/integrations/github_exporter" + "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" + "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" + "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" + mssql_exporter "github.com/grafana/alloy/internal/static/integrations/mssql" + "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" + "github.com/grafana/alloy/internal/static/integrations/node_exporter" + "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" + "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" + "github.com/grafana/alloy/internal/static/integrations/process_exporter" + "github.com/grafana/alloy/internal/static/integrations/redis_exporter" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" + "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" + "github.com/grafana/alloy/internal/static/integrations/squid_exporter" + "github.com/grafana/alloy/internal/static/integrations/statsd_exporter" + agent_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/agent" + apache_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/apache_http" + app_agent_receiver_v2 "github.com/grafana/alloy/internal/static/integrations/v2/app_agent_receiver" + blackbox_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/blackbox_exporter" + common_v2 "github.com/grafana/alloy/internal/static/integrations/v2/common" + eventhandler_v2 "github.com/grafana/alloy/internal/static/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" + snmp_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/snmp_exporter" + "github.com/grafana/alloy/internal/static/integrations/windows_exporter" "github.com/grafana/alloy/syntax/scanner" "github.com/prometheus/common/model" prom_config "github.com/prometheus/prometheus/config" diff --git a/internal/converter/internal/staticconvert/internal/build/builder_logging.go b/internal/converter/internal/staticconvert/internal/build/builder_logging.go index cda77f4849..6862146cc0 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_logging.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_logging.go @@ -3,9 +3,9 @@ package build import ( "reflect" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/static/server" ) func (b *ConfigBuilder) appendLogging(config *server.Config) { diff --git a/internal/converter/internal/staticconvert/internal/build/builder_server.go b/internal/converter/internal/staticconvert/internal/build/builder_server.go index be742a448b..ed2c3a4f5a 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_server.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_server.go @@ -3,9 +3,9 @@ package build import ( "reflect" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/static/server" ) func (b *ConfigBuilder) appendServer(config *server.Config) { diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index b37b658924..8573ddeae4 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -4,9 +4,9 @@ import ( "fmt" "reflect" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/otelcolconvert" - "github.com/grafana/agent/internal/static/traces" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/otelcolconvert" + "github.com/grafana/alloy/internal/static/traces" otel_component "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/loggingexporter" "go.opentelemetry.io/collector/otelcol" diff --git a/internal/converter/internal/staticconvert/internal/build/cadvisor_exporter.go b/internal/converter/internal/staticconvert/internal/build/cadvisor_exporter.go index eab148d9f0..474dbdbcc4 100644 --- a/internal/converter/internal/staticconvert/internal/build/cadvisor_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/cadvisor_exporter.go @@ -3,9 +3,9 @@ package build import ( "time" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/cadvisor" - cadvisor_integration "github.com/grafana/agent/internal/static/integrations/cadvisor" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/cadvisor" + cadvisor_integration "github.com/grafana/alloy/internal/static/integrations/cadvisor" ) func (b *ConfigBuilder) appendCadvisorExporter(config *cadvisor_integration.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/cloudwatch_exporter.go b/internal/converter/internal/staticconvert/internal/build/cloudwatch_exporter.go index 3e35cc4d4e..9879208d32 100644 --- a/internal/converter/internal/staticconvert/internal/build/cloudwatch_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/cloudwatch_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/cloudwatch" - "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/cloudwatch" + "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" ) func (b *ConfigBuilder) appendCloudwatchExporter(config *cloudwatch_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/consul_exporter.go b/internal/converter/internal/staticconvert/internal/build/consul_exporter.go index e6c5231a9c..87b7880ddd 100644 --- a/internal/converter/internal/staticconvert/internal/build/consul_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/consul_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/consul" - "github.com/grafana/agent/internal/static/integrations/consul_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/consul" + "github.com/grafana/alloy/internal/static/integrations/consul_exporter" ) func (b *ConfigBuilder) appendConsulExporter(config *consul_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/dnsmasq_exporter.go b/internal/converter/internal/staticconvert/internal/build/dnsmasq_exporter.go index a3cc9edfdd..8f40954809 100644 --- a/internal/converter/internal/staticconvert/internal/build/dnsmasq_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/dnsmasq_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/dnsmasq" - "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/dnsmasq" + "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" ) func (b *ConfigBuilder) appendDnsmasqExporter(config *dnsmasq_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go index 4fa85919ad..141820d760 100644 --- a/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go @@ -1,10 +1,10 @@ package build import ( - commonCfg "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/elasticsearch" - "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" + commonCfg "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/elasticsearch" + "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/eventhandler.go b/internal/converter/internal/staticconvert/internal/build/eventhandler.go index a99bff8e62..e0485eb9c0 100644 --- a/internal/converter/internal/staticconvert/internal/build/eventhandler.go +++ b/internal/converter/internal/staticconvert/internal/build/eventhandler.go @@ -3,13 +3,13 @@ package build import ( "fmt" - "github.com/grafana/agent/internal/component/common/loki" - flow_relabel "github.com/grafana/agent/internal/component/common/relabel" - "github.com/grafana/agent/internal/component/loki/relabel" - "github.com/grafana/agent/internal/component/loki/source/kubernetes_events" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - eventhandler_v2 "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" + "github.com/grafana/alloy/internal/component/common/loki" + flow_relabel "github.com/grafana/alloy/internal/component/common/relabel" + "github.com/grafana/alloy/internal/component/loki/relabel" + "github.com/grafana/alloy/internal/component/loki/source/kubernetes_events" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + eventhandler_v2 "github.com/grafana/alloy/internal/static/integrations/v2/eventhandler" "github.com/grafana/alloy/syntax/scanner" ) diff --git a/internal/converter/internal/staticconvert/internal/build/gcp_exporter.go b/internal/converter/internal/staticconvert/internal/build/gcp_exporter.go index 27a984fac0..51a51f1fdb 100644 --- a/internal/converter/internal/staticconvert/internal/build/gcp_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/gcp_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/gcp" - "github.com/grafana/agent/internal/static/integrations/gcp_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/gcp" + "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" ) func (b *ConfigBuilder) appendGcpExporter(config *gcp_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/github_exporter.go b/internal/converter/internal/staticconvert/internal/build/github_exporter.go index 860f0bde0d..eadf179377 100644 --- a/internal/converter/internal/staticconvert/internal/build/github_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/github_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/github" - "github.com/grafana/agent/internal/static/integrations/github_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/github" + "github.com/grafana/alloy/internal/static/integrations/github_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/global_context.go b/internal/converter/internal/staticconvert/internal/build/global_context.go index 9ffaceaa6a..0f59ef5e0e 100644 --- a/internal/converter/internal/staticconvert/internal/build/global_context.go +++ b/internal/converter/internal/staticconvert/internal/build/global_context.go @@ -1,8 +1,8 @@ package build import ( - "github.com/grafana/agent/internal/component/prometheus/remotewrite" - "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/converter/internal/common" ) type GlobalContext struct { diff --git a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go index 4842e936b7..89e0cf572a 100644 --- a/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/kafka_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/kafka" - "github.com/grafana/agent/internal/static/integrations/kafka_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/kafka" + "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/memcached_exporter.go b/internal/converter/internal/staticconvert/internal/build/memcached_exporter.go index fd9b428aab..23c3b136e7 100644 --- a/internal/converter/internal/staticconvert/internal/build/memcached_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/memcached_exporter.go @@ -1,10 +1,10 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/memcached" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/static/integrations/memcached_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/memcached" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" ) func (b *ConfigBuilder) appendMemcachedExporter(config *memcached_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go index 0dfb2328b8..d872ce8f98 100644 --- a/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mongodb_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/mongodb" - "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/mongodb" + "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go index 6f5b4beb85..5151c5e2e0 100644 --- a/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mssql_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/mssql" - mssql_exporter "github.com/grafana/agent/internal/static/integrations/mssql" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/mssql" + mssql_exporter "github.com/grafana/alloy/internal/static/integrations/mssql" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go index 91cfee1ae0..11ec660854 100644 --- a/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/mysqld_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/mysql" - "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/mysql" + "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/node_exporter.go b/internal/converter/internal/staticconvert/internal/build/node_exporter.go index 59a4762f30..cd9174a7f4 100644 --- a/internal/converter/internal/staticconvert/internal/build/node_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/node_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/unix" - "github.com/grafana/agent/internal/static/integrations/node_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/unix" + "github.com/grafana/alloy/internal/static/integrations/node_exporter" ) func (b *ConfigBuilder) appendNodeExporter(config *node_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go index b1bd031ce1..a5b41c4f73 100644 --- a/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/oracledb_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/oracledb" - "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/oracledb" + "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go index 13bc519b4a..8689bcdfe8 100644 --- a/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/postgres_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/postgres" - "github.com/grafana/agent/internal/static/integrations/postgres_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/postgres" + "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/process_exporter.go b/internal/converter/internal/staticconvert/internal/build/process_exporter.go index d8136cfa55..8474b7be7b 100644 --- a/internal/converter/internal/staticconvert/internal/build/process_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/process_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/process" - "github.com/grafana/agent/internal/static/integrations/process_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/process" + "github.com/grafana/alloy/internal/static/integrations/process_exporter" ) func (b *ConfigBuilder) appendProcessExporter(config *process_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go index 4d77d009c6..f8975ac340 100644 --- a/internal/converter/internal/staticconvert/internal/build/redis_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/redis_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/redis" - "github.com/grafana/agent/internal/static/integrations/redis_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/redis" + "github.com/grafana/alloy/internal/static/integrations/redis_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/self_exporter.go b/internal/converter/internal/staticconvert/internal/build/self_exporter.go index 31e7b50551..3c2cea7867 100644 --- a/internal/converter/internal/staticconvert/internal/build/self_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/self_exporter.go @@ -1,10 +1,10 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/self" - agent_exporter "github.com/grafana/agent/internal/static/integrations/agent" - agent_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/agent" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/self" + agent_exporter "github.com/grafana/alloy/internal/static/integrations/agent" + agent_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/agent" ) func (b *ConfigBuilder) appendAgentExporter(config *agent_exporter.Config) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go index 1f58d1097e..29004aa9bb 100644 --- a/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snmp_exporter.go @@ -1,11 +1,11 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/snmp" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/snmp" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" + snmp_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/snmp_exporter" "github.com/grafana/alloy/syntax/alloytypes" snmp_config "github.com/prometheus/snmp_exporter/config" ) diff --git a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go index 16eff2c7d7..66858b0a01 100644 --- a/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/snowflake_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/snowflake" - "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/snowflake" + "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go index 5e485b4972..6ce80e8607 100644 --- a/internal/converter/internal/staticconvert/internal/build/squid_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/squid_exporter.go @@ -1,9 +1,9 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/squid" - "github.com/grafana/agent/internal/static/integrations/squid_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/squid" + "github.com/grafana/alloy/internal/static/integrations/squid_exporter" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/converter/internal/staticconvert/internal/build/statsd_exporter.go b/internal/converter/internal/staticconvert/internal/build/statsd_exporter.go index 78aca3ec37..3ec90e3009 100644 --- a/internal/converter/internal/staticconvert/internal/build/statsd_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/statsd_exporter.go @@ -1,10 +1,10 @@ package build import ( - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/statsd" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/static/integrations/statsd_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/statsd" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/static/integrations/statsd_exporter" ) func (b *ConfigBuilder) appendStatsdExporter(config *statsd_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/internal/build/windows_exporter.go b/internal/converter/internal/staticconvert/internal/build/windows_exporter.go index 079c68d489..a2e1e98853 100644 --- a/internal/converter/internal/staticconvert/internal/build/windows_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/windows_exporter.go @@ -3,9 +3,9 @@ package build import ( "strings" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/component/prometheus/exporter/windows" - "github.com/grafana/agent/internal/static/integrations/windows_exporter" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/component/prometheus/exporter/windows" + "github.com/grafana/alloy/internal/static/integrations/windows_exporter" ) func (b *ConfigBuilder) appendWindowsExporter(config *windows_exporter.Config, instanceKey *string) discovery.Exports { diff --git a/internal/converter/internal/staticconvert/staticconvert.go b/internal/converter/internal/staticconvert/staticconvert.go index f2529386e3..844537200b 100644 --- a/internal/converter/internal/staticconvert/staticconvert.go +++ b/internal/converter/internal/staticconvert/staticconvert.go @@ -5,14 +5,14 @@ import ( "flag" "fmt" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/converter/internal/prometheusconvert" - "github.com/grafana/agent/internal/converter/internal/promtailconvert" - "github.com/grafana/agent/internal/converter/internal/staticconvert/internal/build" - "github.com/grafana/agent/internal/static/config" - "github.com/grafana/agent/internal/static/logs" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/converter/internal/prometheusconvert" + "github.com/grafana/alloy/internal/converter/internal/promtailconvert" + "github.com/grafana/alloy/internal/converter/internal/staticconvert/internal/build" + "github.com/grafana/alloy/internal/static/config" + "github.com/grafana/alloy/internal/static/logs" "github.com/grafana/alloy/syntax/scanner" "github.com/grafana/alloy/syntax/token/builder" promtail_config "github.com/grafana/loki/clients/pkg/promtail/config" @@ -20,7 +20,7 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/targets/file" prom_config "github.com/prometheus/prometheus/config" - _ "github.com/grafana/agent/internal/static/integrations/install" // Install integrations + _ "github.com/grafana/alloy/internal/static/integrations/install" // Install integrations ) // Convert implements a Static config converter. diff --git a/internal/converter/internal/staticconvert/staticconvert_test.go b/internal/converter/internal/staticconvert/staticconvert_test.go index a38de5d018..c9c2a54346 100644 --- a/internal/converter/internal/staticconvert/staticconvert_test.go +++ b/internal/converter/internal/staticconvert/staticconvert_test.go @@ -4,9 +4,9 @@ import ( "runtime" "testing" - "github.com/grafana/agent/internal/converter/internal/staticconvert" - "github.com/grafana/agent/internal/converter/internal/test_common" - _ "github.com/grafana/agent/internal/static/metrics/instance" // Imported to override default values via the init function. + "github.com/grafana/alloy/internal/converter/internal/staticconvert" + "github.com/grafana/alloy/internal/converter/internal/test_common" + _ "github.com/grafana/alloy/internal/static/metrics/instance" // Imported to override default values via the init function. ) func TestConvert(t *testing.T) { diff --git a/internal/converter/internal/staticconvert/validate.go b/internal/converter/internal/staticconvert/validate.go index 216b791f59..36e2937528 100644 --- a/internal/converter/internal/staticconvert/validate.go +++ b/internal/converter/internal/staticconvert/validate.go @@ -3,49 +3,49 @@ package staticconvert import ( "fmt" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/converter/internal/common" - "github.com/grafana/agent/internal/static/config" - v1 "github.com/grafana/agent/internal/static/integrations" - agent_exporter "github.com/grafana/agent/internal/static/integrations/agent" - "github.com/grafana/agent/internal/static/integrations/apache_http" - "github.com/grafana/agent/internal/static/integrations/azure_exporter" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - "github.com/grafana/agent/internal/static/integrations/cadvisor" - "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" - "github.com/grafana/agent/internal/static/integrations/consul_exporter" - "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" - "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" - "github.com/grafana/agent/internal/static/integrations/gcp_exporter" - "github.com/grafana/agent/internal/static/integrations/github_exporter" - "github.com/grafana/agent/internal/static/integrations/kafka_exporter" - "github.com/grafana/agent/internal/static/integrations/memcached_exporter" - "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" - mssql_exporter "github.com/grafana/agent/internal/static/integrations/mssql" - "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" - "github.com/grafana/agent/internal/static/integrations/node_exporter" - "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" - "github.com/grafana/agent/internal/static/integrations/postgres_exporter" - "github.com/grafana/agent/internal/static/integrations/process_exporter" - "github.com/grafana/agent/internal/static/integrations/redis_exporter" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" - "github.com/grafana/agent/internal/static/integrations/squid_exporter" - "github.com/grafana/agent/internal/static/integrations/statsd_exporter" - v2 "github.com/grafana/agent/internal/static/integrations/v2" - agent_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/agent" - apache_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/apache_http" - app_agent_receiver_v2 "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" - blackbox_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" - eventhandler_v2 "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" - metricsutils_v2 "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" - snmp_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" - vmware_exporter_v2 "github.com/grafana/agent/internal/static/integrations/v2/vmware_exporter" - "github.com/grafana/agent/internal/static/integrations/windows_exporter" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/static/traces" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/converter/internal/common" + "github.com/grafana/alloy/internal/static/config" + v1 "github.com/grafana/alloy/internal/static/integrations" + agent_exporter "github.com/grafana/alloy/internal/static/integrations/agent" + "github.com/grafana/alloy/internal/static/integrations/apache_http" + "github.com/grafana/alloy/internal/static/integrations/azure_exporter" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + "github.com/grafana/alloy/internal/static/integrations/cadvisor" + "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" + "github.com/grafana/alloy/internal/static/integrations/consul_exporter" + "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" + "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" + "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" + "github.com/grafana/alloy/internal/static/integrations/github_exporter" + "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" + "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" + "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" + mssql_exporter "github.com/grafana/alloy/internal/static/integrations/mssql" + "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" + "github.com/grafana/alloy/internal/static/integrations/node_exporter" + "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" + "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" + "github.com/grafana/alloy/internal/static/integrations/process_exporter" + "github.com/grafana/alloy/internal/static/integrations/redis_exporter" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" + "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" + "github.com/grafana/alloy/internal/static/integrations/squid_exporter" + "github.com/grafana/alloy/internal/static/integrations/statsd_exporter" + v2 "github.com/grafana/alloy/internal/static/integrations/v2" + agent_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/agent" + apache_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/apache_http" + app_agent_receiver_v2 "github.com/grafana/alloy/internal/static/integrations/v2/app_agent_receiver" + blackbox_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/blackbox_exporter" + eventhandler_v2 "github.com/grafana/alloy/internal/static/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" + snmp_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/snmp_exporter" + vmware_exporter_v2 "github.com/grafana/alloy/internal/static/integrations/v2/vmware_exporter" + "github.com/grafana/alloy/internal/static/integrations/windows_exporter" + "github.com/grafana/alloy/internal/static/logs" + "github.com/grafana/alloy/internal/static/metrics" + "github.com/grafana/alloy/internal/static/server" + "github.com/grafana/alloy/internal/static/traces" _ "github.com/prometheus/prometheus/discovery/install" // Register Prometheus SDs ) diff --git a/internal/converter/internal/test_common/testing.go b/internal/converter/internal/test_common/testing.go index fec09f30af..f81130d729 100644 --- a/internal/converter/internal/test_common/testing.go +++ b/internal/converter/internal/test_common/testing.go @@ -12,14 +12,14 @@ import ( "strings" "testing" - "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" - cluster_service "github.com/grafana/agent/internal/service/cluster" - http_service "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" + cluster_service "github.com/grafana/alloy/internal/service/cluster" + http_service "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/filedetector/detector.go b/internal/filedetector/detector.go index 7379527fc7..58cbfb6635 100644 --- a/internal/filedetector/detector.go +++ b/internal/filedetector/detector.go @@ -9,7 +9,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" ) // Detector is used to specify how changes to the file should be detected. diff --git a/internal/flow/componenttest/componenttest.go b/internal/flow/componenttest/componenttest.go index be3a117d40..b3b4de7af4 100644 --- a/internal/flow/componenttest/componenttest.go +++ b/internal/flow/componenttest/componenttest.go @@ -9,13 +9,13 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/service/labelstore" + "github.com/grafana/alloy/internal/service/labelstore" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging" "go.opentelemetry.io/otel/trace/noop" ) diff --git a/internal/flow/componenttest/testfailmodule.go b/internal/flow/componenttest/testfailmodule.go index ddd5c4ae18..996467a337 100644 --- a/internal/flow/componenttest/testfailmodule.go +++ b/internal/flow/componenttest/testfailmodule.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - mod "github.com/grafana/agent/internal/flow/internal/testcomponents/module" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + mod "github.com/grafana/alloy/internal/flow/internal/testcomponents/module" ) func init() { diff --git a/internal/flow/declare_test.go b/internal/flow/declare_test.go index d66505c8fb..20fca527bf 100644 --- a/internal/flow/declare_test.go +++ b/internal/flow/declare_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/flow.go b/internal/flow/flow.go index 7fbc10ac30..2bf4048259 100644 --- a/internal/flow/flow.go +++ b/internal/flow/flow.go @@ -51,13 +51,13 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/worker" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" + "github.com/grafana/alloy/internal/service" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" ) diff --git a/internal/flow/flow_components.go b/internal/flow/flow_components.go index d23b1da6b5..63ea62fa89 100644 --- a/internal/flow/flow_components.go +++ b/internal/flow/flow_components.go @@ -3,9 +3,9 @@ package flow import ( "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/dag" ) // GetComponent implements [component.Provider]. diff --git a/internal/flow/flow_services.go b/internal/flow/flow_services.go index f55380118c..d04298ef39 100644 --- a/internal/flow/flow_services.go +++ b/internal/flow/flow_services.go @@ -3,10 +3,10 @@ package flow import ( "context" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/agent/internal/flow/internal/worker" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/service" ) // GetServiceConsumers implements [service.Host]. It returns a slice of diff --git a/internal/flow/flow_services_test.go b/internal/flow/flow_services_test.go index b6674ad9dc..4ec5d6d359 100644 --- a/internal/flow/flow_services_test.go +++ b/internal/flow/flow_services_test.go @@ -5,13 +5,13 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/internal/testservices" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/internal/testservices" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "go.uber.org/atomic" ) diff --git a/internal/flow/flow_test.go b/internal/flow/flow_test.go index d750568303..7a29e48d36 100644 --- a/internal/flow/flow_test.go +++ b/internal/flow/flow_test.go @@ -5,12 +5,12 @@ import ( "os" "testing" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/logging" "github.com/stretchr/testify/require" "go.uber.org/goleak" ) diff --git a/internal/flow/flow_updates_test.go b/internal/flow/flow_updates_test.go index 4eb8935478..fa8efdd915 100644 --- a/internal/flow/flow_updates_test.go +++ b/internal/flow/flow_updates_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/internal/worker" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/import_test.go b/internal/flow/import_test.go index 89fddafc46..707c203867 100644 --- a/internal/flow/import_test.go +++ b/internal/flow/import_test.go @@ -10,15 +10,15 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" "github.com/stretchr/testify/require" "golang.org/x/tools/txtar" - _ "github.com/grafana/agent/internal/flow/internal/testcomponents/module/string" + _ "github.com/grafana/alloy/internal/flow/internal/testcomponents/module/string" ) // use const to avoid lint error diff --git a/internal/flow/internal/controller/block_node.go b/internal/flow/internal/controller/block_node.go index b43e639a5b..83d0ed1263 100644 --- a/internal/flow/internal/controller/block_node.go +++ b/internal/flow/internal/controller/block_node.go @@ -1,7 +1,7 @@ package controller import ( - "github.com/grafana/agent/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/internal/dag" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/controller/component_node.go b/internal/flow/internal/controller/component_node.go index 1deb18df35..8c797b6100 100644 --- a/internal/flow/internal/controller/component_node.go +++ b/internal/flow/internal/controller/component_node.go @@ -1,7 +1,7 @@ package controller import ( - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" ) // ComponentNode is a generic representation of a Flow component. diff --git a/internal/flow/internal/controller/component_references.go b/internal/flow/internal/controller/component_references.go index c767535d96..7f505a0b35 100644 --- a/internal/flow/internal/controller/component_references.go +++ b/internal/flow/internal/controller/component_references.go @@ -3,7 +3,7 @@ package controller import ( "fmt" - "github.com/grafana/agent/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/internal/dag" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/diag" "github.com/grafana/alloy/syntax/vm" diff --git a/internal/flow/internal/controller/component_registry.go b/internal/flow/internal/controller/component_registry.go index f5359e6308..df75f1655e 100644 --- a/internal/flow/internal/controller/component_registry.go +++ b/internal/flow/internal/controller/component_registry.go @@ -3,8 +3,8 @@ package controller import ( "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" ) // ComponentRegistry is a collection of registered components. diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index b265eb9408..d99a9c5256 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -10,12 +10,12 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/agent/internal/flow/internal/worker" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" + "github.com/grafana/alloy/internal/service" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/diag" "github.com/grafana/dskit/backoff" diff --git a/internal/flow/internal/controller/loader_test.go b/internal/flow/internal/controller/loader_test.go index 1fdd42b0ee..dec59e98ce 100644 --- a/internal/flow/internal/controller/loader_test.go +++ b/internal/flow/internal/controller/loader_test.go @@ -7,12 +7,12 @@ import ( "strings" "testing" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/dag" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/dag" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/diag" "github.com/grafana/alloy/syntax/parser" @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace/noop" - _ "github.com/grafana/agent/internal/flow/internal/testcomponents" // Include test components + _ "github.com/grafana/alloy/internal/flow/internal/testcomponents" // Include test components ) func TestLoader(t *testing.T) { diff --git a/internal/flow/internal/controller/module.go b/internal/flow/internal/controller/module.go index 25a639e399..b514b65e5e 100644 --- a/internal/flow/internal/controller/module.go +++ b/internal/flow/internal/controller/module.go @@ -3,7 +3,7 @@ package controller import ( "context" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/syntax/ast" ) diff --git a/internal/flow/internal/controller/node_builtin_component.go b/internal/flow/internal/controller/node_builtin_component.go index 2ba6a9b158..313a77f9a5 100644 --- a/internal/flow/internal/controller/node_builtin_component.go +++ b/internal/flow/internal/controller/node_builtin_component.go @@ -13,11 +13,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/flow/internal/controller/node_builtin_component_test.go b/internal/flow/internal/controller/node_builtin_component_test.go index 3be8307dd8..feb87de377 100644 --- a/internal/flow/internal/controller/node_builtin_component_test.go +++ b/internal/flow/internal/controller/node_builtin_component_test.go @@ -4,7 +4,7 @@ import ( "path/filepath" "testing" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/featuregate" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/internal/controller/node_config.go b/internal/flow/internal/controller/node_config.go index 8bd233f517..f10d02a899 100644 --- a/internal/flow/internal/controller/node_config.go +++ b/internal/flow/internal/controller/node_config.go @@ -3,7 +3,7 @@ package controller import ( "fmt" - "github.com/grafana/agent/internal/flow/internal/importsource" + "github.com/grafana/alloy/internal/flow/internal/importsource" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/diag" ) diff --git a/internal/flow/internal/controller/node_config_import.go b/internal/flow/internal/controller/node_config_import.go index 25efab3f86..4c268b5857 100644 --- a/internal/flow/internal/controller/node_config_import.go +++ b/internal/flow/internal/controller/node_config_import.go @@ -14,11 +14,11 @@ import ( "go.uber.org/atomic" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/internal/importsource" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/agent/internal/runner" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/internal/importsource" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" + "github.com/grafana/alloy/internal/runner" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/parser" "github.com/grafana/alloy/syntax/vm" diff --git a/internal/flow/internal/controller/node_config_logging.go b/internal/flow/internal/controller/node_config_logging.go index dd923e01fd..e18532988b 100644 --- a/internal/flow/internal/controller/node_config_logging.go +++ b/internal/flow/internal/controller/node_config_logging.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/controller/node_config_tracing.go b/internal/flow/internal/controller/node_config_tracing.go index 8b4fe17ddf..c87fa4dfa9 100644 --- a/internal/flow/internal/controller/node_config_tracing.go +++ b/internal/flow/internal/controller/node_config_tracing.go @@ -5,7 +5,7 @@ import ( "strings" "sync" - "github.com/grafana/agent/internal/flow/tracing" + "github.com/grafana/alloy/internal/flow/tracing" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" "go.opentelemetry.io/otel/trace" diff --git a/internal/flow/internal/controller/node_custom_component.go b/internal/flow/internal/controller/node_custom_component.go index d53457c6e3..d8a50d94a6 100644 --- a/internal/flow/internal/controller/node_custom_component.go +++ b/internal/flow/internal/controller/node_custom_component.go @@ -10,8 +10,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/controller/node_service.go b/internal/flow/internal/controller/node_service.go index c5f2bfc034..504b647e63 100644 --- a/internal/flow/internal/controller/node_service.go +++ b/internal/flow/internal/controller/node_service.go @@ -6,8 +6,8 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/service" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/controller/scheduler_test.go b/internal/flow/internal/controller/scheduler_test.go index aaf809a8f0..41561733eb 100644 --- a/internal/flow/internal/controller/scheduler_test.go +++ b/internal/flow/internal/controller/scheduler_test.go @@ -5,8 +5,8 @@ import ( "sync" "testing" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/internal/controller" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/vm" "github.com/stretchr/testify/require" diff --git a/internal/flow/internal/controller/service_map.go b/internal/flow/internal/controller/service_map.go index a6dfc55a50..dd7424de57 100644 --- a/internal/flow/internal/controller/service_map.go +++ b/internal/flow/internal/controller/service_map.go @@ -1,7 +1,7 @@ package controller import ( - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/service" "golang.org/x/exp/maps" ) diff --git a/internal/flow/internal/controller/value_cache.go b/internal/flow/internal/controller/value_cache.go index b9e66aca38..76bfd05f7a 100644 --- a/internal/flow/internal/controller/value_cache.go +++ b/internal/flow/internal/controller/value_cache.go @@ -4,7 +4,7 @@ import ( "reflect" "sync" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/importsource/import_file.go b/internal/flow/internal/importsource/import_file.go index 06e83bf166..a5106a132f 100644 --- a/internal/flow/internal/importsource/import_file.go +++ b/internal/flow/internal/importsource/import_file.go @@ -13,9 +13,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - filedetector "github.com/grafana/agent/internal/filedetector" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + filedetector "github.com/grafana/alloy/internal/filedetector" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/importsource/import_git.go b/internal/flow/internal/importsource/import_git.go index d0eec75a5e..6bb3a02fea 100644 --- a/internal/flow/internal/importsource/import_git.go +++ b/internal/flow/internal/importsource/import_git.go @@ -12,9 +12,9 @@ import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/vcs" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/vcs" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/importsource/import_http.go b/internal/flow/internal/importsource/import_http.go index 30c2e19338..383fe85db2 100644 --- a/internal/flow/internal/importsource/import_http.go +++ b/internal/flow/internal/importsource/import_http.go @@ -7,9 +7,9 @@ import ( "reflect" "time" - "github.com/grafana/agent/internal/component" - common_config "github.com/grafana/agent/internal/component/common/config" - remote_http "github.com/grafana/agent/internal/component/remote/http" + "github.com/grafana/alloy/internal/component" + common_config "github.com/grafana/alloy/internal/component/common/config" + remote_http "github.com/grafana/alloy/internal/component/remote/http" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/importsource/import_source.go b/internal/flow/internal/importsource/import_source.go index 87faa86f35..99f51c6241 100644 --- a/internal/flow/internal/importsource/import_source.go +++ b/internal/flow/internal/importsource/import_source.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index b66970821f..91057f9994 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -5,7 +5,7 @@ import ( "fmt" "reflect" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/syntax/alloytypes" "github.com/grafana/alloy/syntax/vm" ) diff --git a/internal/flow/internal/testcomponents/count.go b/internal/flow/internal/testcomponents/count.go index fb7d81a87f..0cb1f11a43 100644 --- a/internal/flow/internal/testcomponents/count.go +++ b/internal/flow/internal/testcomponents/count.go @@ -7,9 +7,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "go.uber.org/atomic" ) diff --git a/internal/flow/internal/testcomponents/experimental.go b/internal/flow/internal/testcomponents/experimental.go index 73721d2394..677794a886 100644 --- a/internal/flow/internal/testcomponents/experimental.go +++ b/internal/flow/internal/testcomponents/experimental.go @@ -4,8 +4,8 @@ import ( "context" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" ) func init() { diff --git a/internal/flow/internal/testcomponents/fake.go b/internal/flow/internal/testcomponents/fake.go index c75ea304a8..c94502c7fe 100644 --- a/internal/flow/internal/testcomponents/fake.go +++ b/internal/flow/internal/testcomponents/fake.go @@ -3,7 +3,7 @@ package testcomponents import ( "context" - "github.com/grafana/agent/internal/component" + "github.com/grafana/alloy/internal/component" ) // Fake is a fake component instance which invokes fields when its methods are diff --git a/internal/flow/internal/testcomponents/module/file/file.go b/internal/flow/internal/testcomponents/module/file/file.go index bb223f53be..0941df8418 100644 --- a/internal/flow/internal/testcomponents/module/file/file.go +++ b/internal/flow/internal/testcomponents/module/file/file.go @@ -6,10 +6,10 @@ import ( "go.uber.org/atomic" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/local/file" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/testcomponents/module" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/local/file" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/testcomponents/module" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/flow/internal/testcomponents/module/git/git.go b/internal/flow/internal/testcomponents/module/git/git.go index 5fafe6894a..d5e9a2ec4e 100644 --- a/internal/flow/internal/testcomponents/module/git/git.go +++ b/internal/flow/internal/testcomponents/module/git/git.go @@ -10,11 +10,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/testcomponents/module" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/vcs" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/testcomponents/module" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/vcs" ) func init() { diff --git a/internal/flow/internal/testcomponents/module/http/http.go b/internal/flow/internal/testcomponents/module/http/http.go index fd69a6cd64..ffb18d4e17 100644 --- a/internal/flow/internal/testcomponents/module/http/http.go +++ b/internal/flow/internal/testcomponents/module/http/http.go @@ -6,10 +6,10 @@ import ( "go.uber.org/atomic" - "github.com/grafana/agent/internal/component" - remote_http "github.com/grafana/agent/internal/component/remote/http" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/testcomponents/module" + "github.com/grafana/alloy/internal/component" + remote_http "github.com/grafana/alloy/internal/component/remote/http" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/testcomponents/module" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/flow/internal/testcomponents/module/module.go b/internal/flow/internal/testcomponents/module/module.go index e83df417a8..2fe70e5e8e 100644 --- a/internal/flow/internal/testcomponents/module/module.go +++ b/internal/flow/internal/testcomponents/module/module.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow/logging/level" ) // ModuleComponent holds the common properties for module components. diff --git a/internal/flow/internal/testcomponents/module/string/string.go b/internal/flow/internal/testcomponents/module/string/string.go index e5b92416d8..7012aa1b9a 100644 --- a/internal/flow/internal/testcomponents/module/string/string.go +++ b/internal/flow/internal/testcomponents/module/string/string.go @@ -3,9 +3,9 @@ package string import ( "context" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/testcomponents/module" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/testcomponents/module" "github.com/grafana/alloy/syntax/alloytypes" ) diff --git a/internal/flow/internal/testcomponents/passthrough.go b/internal/flow/internal/testcomponents/passthrough.go index 4a079a4f53..278f35b578 100644 --- a/internal/flow/internal/testcomponents/passthrough.go +++ b/internal/flow/internal/testcomponents/passthrough.go @@ -5,9 +5,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/flow/internal/testcomponents/sumation.go b/internal/flow/internal/testcomponents/sumation.go index 7df8fb0f2f..c4207c2892 100644 --- a/internal/flow/internal/testcomponents/sumation.go +++ b/internal/flow/internal/testcomponents/sumation.go @@ -4,9 +4,9 @@ import ( "context" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" "go.uber.org/atomic" ) diff --git a/internal/flow/internal/testcomponents/tick.go b/internal/flow/internal/testcomponents/tick.go index 1e16f60cab..1be12fda39 100644 --- a/internal/flow/internal/testcomponents/tick.go +++ b/internal/flow/internal/testcomponents/tick.go @@ -7,9 +7,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" ) func init() { diff --git a/internal/flow/internal/testservices/fake.go b/internal/flow/internal/testservices/fake.go index 2963f1057b..eae4ad495c 100644 --- a/internal/flow/internal/testservices/fake.go +++ b/internal/flow/internal/testservices/fake.go @@ -3,7 +3,7 @@ package testservices import ( "context" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/service" ) // The Fake service allows injecting custom behavior for interface methods. diff --git a/internal/flow/logging/level/level.go b/internal/flow/logging/level/level.go index d343a1b9da..0cce098d37 100644 --- a/internal/flow/logging/level/level.go +++ b/internal/flow/logging/level/level.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/log" gokitlevel "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging" ) const ( diff --git a/internal/flow/logging/logger.go b/internal/flow/logging/logger.go index 39fbab54e8..2a81d57b41 100644 --- a/internal/flow/logging/logger.go +++ b/internal/flow/logging/logger.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/slogadapter" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/slogadapter" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" ) diff --git a/internal/flow/logging/logger_test.go b/internal/flow/logging/logger_test.go index 79c84d6307..cb6823fe4a 100644 --- a/internal/flow/logging/logger_test.go +++ b/internal/flow/logging/logger_test.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log" gokitlevel "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/component/common/loki" - "github.com/grafana/agent/internal/flow/logging" - flowlevel "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/component/common/loki" + "github.com/grafana/alloy/internal/flow/logging" + flowlevel "github.com/grafana/alloy/internal/flow/logging/level" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/logging/options.go b/internal/flow/logging/options.go index 0fbb576456..1d084bd2e7 100644 --- a/internal/flow/logging/options.go +++ b/internal/flow/logging/options.go @@ -6,7 +6,7 @@ import ( "log/slog" "math" - "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/alloy/syntax" ) diff --git a/internal/flow/module.go b/internal/flow/module.go index 3744187beb..9684e631cd 100644 --- a/internal/flow/module.go +++ b/internal/flow/module.go @@ -6,13 +6,13 @@ import ( "path" "sync" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/worker" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/scanner" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/flow/module_eval_test.go b/internal/flow/module_eval_test.go index 7e4c632e1a..0eeb41b58c 100644 --- a/internal/flow/module_eval_test.go +++ b/internal/flow/module_eval_test.go @@ -10,21 +10,21 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/internal/testcomponents" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" - cluster_service "github.com/grafana/agent/internal/service/cluster" - http_service "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" - otel_service "github.com/grafana/agent/internal/service/otel" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/internal/testcomponents" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" + cluster_service "github.com/grafana/alloy/internal/service/cluster" + http_service "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" + otel_service "github.com/grafana/alloy/internal/service/otel" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/goleak" - _ "github.com/grafana/agent/internal/flow/internal/testcomponents/module/string" + _ "github.com/grafana/alloy/internal/flow/internal/testcomponents/module/string" ) func TestUpdates_EmptyModule(t *testing.T) { diff --git a/internal/flow/module_fail_test.go b/internal/flow/module_fail_test.go index a6ec2eeec6..0c300b71e7 100644 --- a/internal/flow/module_fail_test.go +++ b/internal/flow/module_fail_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/internal/controller" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/module_test.go b/internal/flow/module_test.go index 6b2b25981c..3f262e9308 100644 --- a/internal/flow/module_test.go +++ b/internal/flow/module_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/internal/controller" - "github.com/grafana/agent/internal/flow/internal/worker" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/internal/controller" + "github.com/grafana/alloy/internal/flow/internal/worker" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) diff --git a/internal/flow/source.go b/internal/flow/source.go index 2ed0eee186..70953591e1 100644 --- a/internal/flow/source.go +++ b/internal/flow/source.go @@ -6,7 +6,7 @@ import ( "sort" "strings" - "github.com/grafana/agent/internal/static/config/encoder" + "github.com/grafana/alloy/internal/static/config/encoder" "github.com/grafana/alloy/syntax/ast" "github.com/grafana/alloy/syntax/diag" "github.com/grafana/alloy/syntax/parser" diff --git a/internal/flow/source_test.go b/internal/flow/source_test.go index 054e3bbcec..d453d2b530 100644 --- a/internal/flow/source_test.go +++ b/internal/flow/source_test.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/alloy/syntax/diag" "github.com/stretchr/testify/require" - _ "github.com/grafana/agent/internal/flow/internal/testcomponents" // Include test components + _ "github.com/grafana/alloy/internal/flow/internal/testcomponents" // Include test components ) func TestParseSource(t *testing.T) { diff --git a/internal/flow/tracing/internal/jaegerremote/sampler.go b/internal/flow/tracing/internal/jaegerremote/sampler.go index 9d24aaa899..a21359cbb5 100644 --- a/internal/flow/tracing/internal/jaegerremote/sampler.go +++ b/internal/flow/tracing/internal/jaegerremote/sampler.go @@ -23,7 +23,7 @@ import ( "math" "sync" - "github.com/grafana/agent/internal/flow/tracing/internal/jaegerremote/utils" + "github.com/grafana/alloy/internal/flow/tracing/internal/jaegerremote/utils" jaeger_api_v2 "github.com/jaegertracing/jaeger/proto-gen/api_v2" "go.opentelemetry.io/otel/sdk/trace" oteltrace "go.opentelemetry.io/otel/trace" diff --git a/internal/flow/tracing/otelcol_client.go b/internal/flow/tracing/otelcol_client.go index 13d0c6631b..8d9e3163d6 100644 --- a/internal/flow/tracing/otelcol_client.go +++ b/internal/flow/tracing/otelcol_client.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/alloy/internal/component/otelcol" "github.com/hashicorp/go-multierror" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/internal/flow/tracing/tracing.go b/internal/flow/tracing/tracing.go index 1da4c26d1b..69b29a41c5 100644 --- a/internal/flow/tracing/tracing.go +++ b/internal/flow/tracing/tracing.go @@ -8,9 +8,9 @@ import ( "sync" "time" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/component/otelcol" - "github.com/grafana/agent/internal/flow/tracing/internal/jaegerremote" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/component/otelcol" + "github.com/grafana/alloy/internal/flow/tracing/internal/jaegerremote" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" diff --git a/internal/flowmode/cluster_builder.go b/internal/flowmode/cluster_builder.go index 70422c4574..292f04c947 100644 --- a/internal/flowmode/cluster_builder.go +++ b/internal/flowmode/cluster_builder.go @@ -9,8 +9,8 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service/cluster" "github.com/grafana/ckit/advertise" "github.com/hashicorp/go-discover" "github.com/hashicorp/go-discover/provider/k8s" diff --git a/internal/flowmode/cmd_convert.go b/internal/flowmode/cmd_convert.go index fa9fe81a95..e7c77b82e4 100644 --- a/internal/flowmode/cmd_convert.go +++ b/internal/flowmode/cmd_convert.go @@ -11,8 +11,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/grafana/agent/internal/converter" - convert_diag "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/alloy/internal/converter" + convert_diag "github.com/grafana/alloy/internal/converter/diag" "github.com/grafana/alloy/syntax/diag" ) diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index c9d7585e5c..fd4238d7a6 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -17,24 +17,24 @@ import ( "github.com/fatih/color" "github.com/go-kit/log" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/boringcrypto" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/converter" - convert_diag "github.com/grafana/agent/internal/converter/diag" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/flow/tracing" - "github.com/grafana/agent/internal/service" - httpservice "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/service/labelstore" - otel_service "github.com/grafana/agent/internal/service/otel" - remotecfgservice "github.com/grafana/agent/internal/service/remotecfg" - uiservice "github.com/grafana/agent/internal/service/ui" - "github.com/grafana/agent/internal/static/config/instrumentation" - "github.com/grafana/agent/internal/usagestats" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/boringcrypto" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/converter" + convert_diag "github.com/grafana/alloy/internal/converter/diag" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/tracing" + "github.com/grafana/alloy/internal/service" + httpservice "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/service/labelstore" + otel_service "github.com/grafana/alloy/internal/service/otel" + remotecfgservice "github.com/grafana/alloy/internal/service/remotecfg" + uiservice "github.com/grafana/alloy/internal/service/ui" + "github.com/grafana/alloy/internal/static/config/instrumentation" + "github.com/grafana/alloy/internal/usagestats" "github.com/grafana/alloy/syntax/diag" "github.com/grafana/ckit/advertise" "github.com/grafana/ckit/peer" @@ -44,7 +44,7 @@ import ( "golang.org/x/exp/maps" // Install Components - _ "github.com/grafana/agent/internal/component/all" + _ "github.com/grafana/alloy/internal/component/all" ) func runCommand() *cobra.Command { diff --git a/internal/flowmode/cmd_tools.go b/internal/flowmode/cmd_tools.go index 7224668ab0..3fdca9d1e0 100644 --- a/internal/flowmode/cmd_tools.go +++ b/internal/flowmode/cmd_tools.go @@ -3,7 +3,7 @@ package flowmode import ( "fmt" - "github.com/grafana/agent/internal/component/prometheus/remotewrite" + "github.com/grafana/alloy/internal/component/prometheus/remotewrite" "github.com/spf13/cobra" ) diff --git a/internal/flowmode/flowmode.go b/internal/flowmode/flowmode.go index d5edcb48b4..0c588fa15f 100644 --- a/internal/flowmode/flowmode.go +++ b/internal/flowmode/flowmode.go @@ -5,7 +5,7 @@ import ( "fmt" "os" - "github.com/grafana/agent/internal/build" + "github.com/grafana/alloy/internal/build" "github.com/spf13/cobra" ) diff --git a/internal/flowmode/resources_collector.go b/internal/flowmode/resources_collector.go index f4a1123734..c972c2cd1d 100644 --- a/internal/flowmode/resources_collector.go +++ b/internal/flowmode/resources_collector.go @@ -5,7 +5,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging/level" + "github.com/grafana/alloy/internal/flow/logging/level" "github.com/prometheus/client_golang/prometheus" "github.com/shirou/gopsutil/v3/net" "github.com/shirou/gopsutil/v3/process" diff --git a/internal/loki/client/client.go b/internal/loki/client/client.go index 46c40bbdfc..d52022939d 100644 --- a/internal/loki/client/client.go +++ b/internal/loki/client/client.go @@ -12,7 +12,7 @@ import ( "strings" log "github.com/go-kit/log" - "github.com/grafana/agent/internal/loki/client/internal" + "github.com/grafana/alloy/internal/loki/client/internal" "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/mimir/client/client.go b/internal/mimir/client/client.go index 802c40d6ac..afa2516cf0 100644 --- a/internal/mimir/client/client.go +++ b/internal/mimir/client/client.go @@ -12,7 +12,7 @@ import ( "strings" log "github.com/go-kit/log" - "github.com/grafana/agent/internal/mimir/client/internal" + "github.com/grafana/alloy/internal/mimir/client/internal" "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/runner/runner_test.go b/internal/runner/runner_test.go index 60fc84068c..ac636dc3cb 100644 --- a/internal/runner/runner_test.go +++ b/internal/runner/runner_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/grafana/agent/internal/runner" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/runner" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "go.uber.org/atomic" ) diff --git a/internal/service/cluster/cluster.go b/internal/service/cluster/cluster.go index d98b5faf69..dfc4efe35b 100644 --- a/internal/service/cluster/cluster.go +++ b/internal/service/cluster/cluster.go @@ -14,11 +14,11 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service" - http_service "github.com/grafana/agent/internal/service/http" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service" + http_service "github.com/grafana/alloy/internal/service/http" "github.com/grafana/ckit" "github.com/grafana/ckit/peer" "github.com/grafana/ckit/shard" diff --git a/internal/service/http/handler.go b/internal/service/http/handler.go index 98796530dd..434e0ab28d 100644 --- a/internal/service/http/handler.go +++ b/internal/service/http/handler.go @@ -6,7 +6,7 @@ import ( "crypto/tls" "fmt" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/static/server" ) // tlsConfig generates a tls.Config from args. diff --git a/internal/service/http/handler_windows.go b/internal/service/http/handler_windows.go index 2b5cb4efab..6759abae1a 100644 --- a/internal/service/http/handler_windows.go +++ b/internal/service/http/handler_windows.go @@ -3,7 +3,7 @@ package http import ( "crypto/tls" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/static/server" ) // tlsConfig generates a tls.Config from args. diff --git a/internal/service/http/http.go b/internal/service/http/http.go index 516d037a7a..3cdb2549bd 100644 --- a/internal/service/http/http.go +++ b/internal/service/http/http.go @@ -15,12 +15,12 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/static/server" "github.com/grafana/ckit/memconn" _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // Register godeltaprof handler "github.com/prometheus/client_golang/prometheus" diff --git a/internal/service/http/http_test.go b/internal/service/http/http_test.go index b34d66f927..a46292f0fc 100644 --- a/internal/service/http/http_test.go +++ b/internal/service/http/http_test.go @@ -6,11 +6,11 @@ import ( "net/http" "testing" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/service/http/split_path.go b/internal/service/http/split_path.go index 93cdfbda05..d96834ae77 100644 --- a/internal/service/http/split_path.go +++ b/internal/service/http/split_path.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/service" ) // splitURLPath splits a path from a URL into two parts: a component ID and the diff --git a/internal/service/http/split_path_test.go b/internal/service/http/split_path_test.go index c580f830dd..5b881ce347 100644 --- a/internal/service/http/split_path_test.go +++ b/internal/service/http/split_path_test.go @@ -3,8 +3,8 @@ package http import ( "testing" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/service" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/internal/service/labelstore/service.go b/internal/service/labelstore/service.go index 3a536ff2dc..3749e5b7bf 100644 --- a/internal/service/labelstore/service.go +++ b/internal/service/labelstore/service.go @@ -6,10 +6,10 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - agent_service "github.com/grafana/agent/internal/service" - flow_service "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + agent_service "github.com/grafana/alloy/internal/service" + flow_service "github.com/grafana/alloy/internal/service" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" diff --git a/internal/service/otel/otel.go b/internal/service/otel/otel.go index 4713abaeaf..5415277c10 100644 --- a/internal/service/otel/otel.go +++ b/internal/service/otel/otel.go @@ -8,9 +8,9 @@ import ( "fmt" "github.com/go-kit/log" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/util" ) // ServiceName defines the name used for the otel service. diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index 75bafd8071..8d9d7296a5 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -15,11 +15,11 @@ import ( "github.com/go-kit/log" agentv1 "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1" "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1/agentv1connect" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/component/common/config" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow/logging/level" - "github.com/grafana/agent/internal/service" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/component/common/config" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow/logging/level" + "github.com/grafana/alloy/internal/service" "github.com/grafana/alloy/syntax" commonconfig "github.com/prometheus/common/config" ) diff --git a/internal/service/remotecfg/remotecfg_test.go b/internal/service/remotecfg/remotecfg_test.go index e8d8f7c61f..143eb0905d 100644 --- a/internal/service/remotecfg/remotecfg_test.go +++ b/internal/service/remotecfg/remotecfg_test.go @@ -11,14 +11,14 @@ import ( "connectrpc.com/connect" agentv1 "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1" - "github.com/grafana/agent/internal/component" - _ "github.com/grafana/agent/internal/component/loki/process" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/flow" - "github.com/grafana/agent/internal/flow/componenttest" - "github.com/grafana/agent/internal/flow/logging" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component" + _ "github.com/grafana/alloy/internal/component/loki/process" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/flow" + "github.com/grafana/alloy/internal/flow/componenttest" + "github.com/grafana/alloy/internal/flow/logging" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/util" "github.com/grafana/alloy/syntax" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" diff --git a/internal/service/service.go b/internal/service/service.go index 62751d464c..8901947813 100644 --- a/internal/service/service.go +++ b/internal/service/service.go @@ -10,8 +10,8 @@ import ( "context" "fmt" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/featuregate" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/featuregate" ) // Definition describes an individual Flow service. Services have unique names diff --git a/internal/service/ui/ui.go b/internal/service/ui/ui.go index ebc56943f8..6ba32ef415 100644 --- a/internal/service/ui/ui.go +++ b/internal/service/ui/ui.go @@ -8,11 +8,11 @@ import ( "path" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/featuregate" - "github.com/grafana/agent/internal/service" - http_service "github.com/grafana/agent/internal/service/http" - "github.com/grafana/agent/internal/web/api" - "github.com/grafana/agent/internal/web/ui" + "github.com/grafana/alloy/internal/featuregate" + "github.com/grafana/alloy/internal/service" + http_service "github.com/grafana/alloy/internal/service/http" + "github.com/grafana/alloy/internal/web/api" + "github.com/grafana/alloy/internal/web/ui" ) // ServiceName defines the name used for the UI service. diff --git a/internal/static/config/config.go b/internal/static/config/config.go index 2ec8f4f590..411360f848 100644 --- a/internal/static/config/config.go +++ b/internal/static/config/config.go @@ -10,14 +10,14 @@ import ( "unicode" "github.com/drone/envsubst/v2" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/config/encoder" - "github.com/grafana/agent/internal/static/config/features" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/static/traces" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/static/config/encoder" + "github.com/grafana/alloy/internal/static/config/features" + "github.com/grafana/alloy/internal/static/logs" + "github.com/grafana/alloy/internal/static/metrics" + "github.com/grafana/alloy/internal/static/server" + "github.com/grafana/alloy/internal/static/traces" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) diff --git a/internal/static/config/config_test.go b/internal/static/config/config_test.go index 364f2f2513..7affe45129 100644 --- a/internal/static/config/config_test.go +++ b/internal/static/config/config_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/static/config/encoder" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/config/encoder" + "github.com/grafana/alloy/internal/static/metrics" + "github.com/grafana/alloy/internal/static/metrics/instance" + "github.com/grafana/alloy/internal/util" commonCfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" promCfg "github.com/prometheus/prometheus/config" diff --git a/internal/static/config/integrations.go b/internal/static/config/integrations.go index f0d2652e6d..c35fdba61d 100644 --- a/internal/static/config/integrations.go +++ b/internal/static/config/integrations.go @@ -4,11 +4,11 @@ import ( "fmt" "reflect" - v1 "github.com/grafana/agent/internal/static/integrations" - v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/server" - "github.com/grafana/agent/internal/util" + v1 "github.com/grafana/alloy/internal/static/integrations" + v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/metrics" + "github.com/grafana/alloy/internal/static/server" + "github.com/grafana/alloy/internal/util" "gopkg.in/yaml.v2" ) diff --git a/internal/static/config/integrations_test.go b/internal/static/config/integrations_test.go index d3537997e0..0a0c159dee 100644 --- a/internal/static/config/integrations_test.go +++ b/internal/static/config/integrations_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/require" - _ "github.com/grafana/agent/internal/static/integrations/install" // Install integrations for tests - "github.com/grafana/agent/internal/util" + _ "github.com/grafana/alloy/internal/static/integrations/install" // Install integrations for tests + "github.com/grafana/alloy/internal/util" ) func TestIntegrations_v1(t *testing.T) { diff --git a/internal/static/integrations/agent/agent.go b/internal/static/integrations/agent/agent.go index 4134dc13c3..72d274ecbc 100644 --- a/internal/static/integrations/agent/agent.go +++ b/internal/static/integrations/agent/agent.go @@ -8,8 +8,8 @@ import ( "net/http" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/config" "github.com/prometheus/client_golang/prometheus/promhttp" ) diff --git a/internal/static/integrations/apache_http/apache_http.go b/internal/static/integrations/apache_http/apache_http.go index 522de99aef..7d3295a14d 100644 --- a/internal/static/integrations/apache_http/apache_http.go +++ b/internal/static/integrations/apache_http/apache_http.go @@ -7,7 +7,7 @@ import ( ae "github.com/Lusitaniae/apache_exporter/collector" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" ) // DefaultConfig holds the default settings for the apache_http integration diff --git a/internal/static/integrations/azure_exporter/azure_exporter.go b/internal/static/integrations/azure_exporter/azure_exporter.go index dd8e6b2677..d7103f004d 100644 --- a/internal/static/integrations/azure_exporter/azure_exporter.go +++ b/internal/static/integrations/azure_exporter/azure_exporter.go @@ -14,7 +14,7 @@ import ( "github.com/webdevops/azure-metrics-exporter/metrics" "github.com/webdevops/go-common/azuresdk/armclient" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) type Exporter struct { diff --git a/internal/static/integrations/azure_exporter/config.go b/internal/static/integrations/azure_exporter/config.go index 3cef29da56..d3dfb791d6 100644 --- a/internal/static/integrations/azure_exporter/config.go +++ b/internal/static/integrations/azure_exporter/config.go @@ -16,10 +16,10 @@ import ( "github.com/webdevops/go-common/azuresdk/cloudconfig" "gopkg.in/yaml.v3" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/util/zapadapter" ) func init() { diff --git a/internal/static/integrations/azure_exporter/config_test.go b/internal/static/integrations/azure_exporter/config_test.go index 1065286ffc..75f9fd037f 100644 --- a/internal/static/integrations/azure_exporter/config_test.go +++ b/internal/static/integrations/azure_exporter/config_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/webdevops/azure-metrics-exporter/metrics" - "github.com/grafana/agent/internal/static/integrations/azure_exporter" + "github.com/grafana/alloy/internal/static/integrations/azure_exporter" ) func TestConfig_ToScrapeSettings(t *testing.T) { diff --git a/internal/static/integrations/blackbox_exporter/blackbox_exporter.go b/internal/static/integrations/blackbox_exporter/blackbox_exporter.go index 28ffdaf95c..6f20fa5db7 100644 --- a/internal/static/integrations/blackbox_exporter/blackbox_exporter.go +++ b/internal/static/integrations/blackbox_exporter/blackbox_exporter.go @@ -7,9 +7,9 @@ import ( "net/url" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/config" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/config" + "github.com/grafana/alloy/internal/util" blackbox_config "github.com/prometheus/blackbox_exporter/config" "github.com/prometheus/blackbox_exporter/prober" "github.com/prometheus/client_golang/prometheus" diff --git a/internal/static/integrations/blackbox_exporter/blackbox_exporter_test.go b/internal/static/integrations/blackbox_exporter/blackbox_exporter_test.go index eb3bfb22bc..d2b69b5cc3 100644 --- a/internal/static/integrations/blackbox_exporter/blackbox_exporter_test.go +++ b/internal/static/integrations/blackbox_exporter/blackbox_exporter_test.go @@ -4,7 +4,7 @@ import ( "net/url" "testing" - integrations "github.com/grafana/agent/internal/static/integrations/config" + integrations "github.com/grafana/alloy/internal/static/integrations/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) diff --git a/internal/static/integrations/cadvisor/cadvisor.go b/internal/static/integrations/cadvisor/cadvisor.go index ec1094ab68..7b0b998bab 100644 --- a/internal/static/integrations/cadvisor/cadvisor.go +++ b/internal/static/integrations/cadvisor/cadvisor.go @@ -20,7 +20,7 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" // Register container providers diff --git a/internal/static/integrations/cadvisor/cadvisor_stub.go b/internal/static/integrations/cadvisor/cadvisor_stub.go index 35bdd3455b..b1aa592459 100644 --- a/internal/static/integrations/cadvisor/cadvisor_stub.go +++ b/internal/static/integrations/cadvisor/cadvisor_stub.go @@ -6,8 +6,8 @@ import ( "context" "net/http" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/config" "github.com/go-kit/log" "github.com/go-kit/log/level" diff --git a/internal/static/integrations/cadvisor/cadvisor_test.go b/internal/static/integrations/cadvisor/cadvisor_test.go index 17d2ebc49c..4d28723987 100644 --- a/internal/static/integrations/cadvisor/cadvisor_test.go +++ b/internal/static/integrations/cadvisor/cadvisor_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) diff --git a/internal/static/integrations/cadvisor/common.go b/internal/static/integrations/cadvisor/common.go index 979f3e0f13..4959090418 100644 --- a/internal/static/integrations/cadvisor/common.go +++ b/internal/static/integrations/cadvisor/common.go @@ -4,9 +4,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) const name = "cadvisor" diff --git a/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter.go b/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter.go index 81a2d876be..9aa97b6163 100644 --- a/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter.go +++ b/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) type cachingFactory interface { diff --git a/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter_decoupled.go b/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter_decoupled.go index cd418c4eeb..c813fad492 100644 --- a/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter_decoupled.go +++ b/internal/static/integrations/cloudwatch_exporter/cloudwatch_exporter_decoupled.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/atomic" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // asyncExporter wraps YACE entrypoint around an Integration implementation diff --git a/internal/static/integrations/cloudwatch_exporter/config.go b/internal/static/integrations/cloudwatch_exporter/config.go index 04390c1d36..eebe985f8b 100644 --- a/internal/static/integrations/cloudwatch_exporter/config.go +++ b/internal/static/integrations/cloudwatch_exporter/config.go @@ -11,9 +11,9 @@ import ( yaceModel "github.com/nerdswords/yet-another-cloudwatch-exporter/pkg/model" "gopkg.in/yaml.v2" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) const ( diff --git a/internal/static/integrations/collector_integration.go b/internal/static/integrations/collector_integration.go index 54c3e4ffed..76c7197d80 100644 --- a/internal/static/integrations/collector_integration.go +++ b/internal/static/integrations/collector_integration.go @@ -5,8 +5,8 @@ import ( "fmt" "net/http" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/static/integrations/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) diff --git a/internal/static/integrations/consul_exporter/consul_exporter.go b/internal/static/integrations/consul_exporter/consul_exporter.go index 83518f5bb9..42a279b2fc 100644 --- a/internal/static/integrations/consul_exporter/consul_exporter.go +++ b/internal/static/integrations/consul_exporter/consul_exporter.go @@ -7,9 +7,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" consul_api "github.com/hashicorp/consul/api" "github.com/prometheus/consul_exporter/pkg/exporter" ) diff --git a/internal/static/integrations/dnsmasq_exporter/dnsmasq_exporter.go b/internal/static/integrations/dnsmasq_exporter/dnsmasq_exporter.go index 67bdd03f6b..abe471ef3a 100644 --- a/internal/static/integrations/dnsmasq_exporter/dnsmasq_exporter.go +++ b/internal/static/integrations/dnsmasq_exporter/dnsmasq_exporter.go @@ -4,9 +4,9 @@ package dnsmasq_exporter import ( "github.com/go-kit/log" "github.com/google/dnsmasq_exporter/collector" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/miekg/dns" ) diff --git a/internal/static/integrations/elasticsearch_exporter/elasticsearch_exporter.go b/internal/static/integrations/elasticsearch_exporter/elasticsearch_exporter.go index 980d491cd0..a824a1ae66 100644 --- a/internal/static/integrations/elasticsearch_exporter/elasticsearch_exporter.go +++ b/internal/static/integrations/elasticsearch_exporter/elasticsearch_exporter.go @@ -14,9 +14,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/client_golang/prometheus" promCfg "github.com/prometheus/common/config" diff --git a/internal/static/integrations/gcp_exporter/gcp_exporter.go b/internal/static/integrations/gcp_exporter/gcp_exporter.go index 3b684e402f..fed0251416 100644 --- a/internal/static/integrations/gcp_exporter/gcp_exporter.go +++ b/internal/static/integrations/gcp_exporter/gcp_exporter.go @@ -22,9 +22,9 @@ import ( "google.golang.org/api/option" "gopkg.in/yaml.v2" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) func init() { diff --git a/internal/static/integrations/gcp_exporter/gcp_exporter_test.go b/internal/static/integrations/gcp_exporter/gcp_exporter_test.go index 075fd267b6..ff3dfbb2c7 100644 --- a/internal/static/integrations/gcp_exporter/gcp_exporter_test.go +++ b/internal/static/integrations/gcp_exporter/gcp_exporter_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/grafana/agent/internal/static/integrations/gcp_exporter" + "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" ) func TestConfig_Validate(t *testing.T) { diff --git a/internal/static/integrations/github_exporter/github_exporter.go b/internal/static/integrations/github_exporter/github_exporter.go index a28e8226ab..2b495390d1 100644 --- a/internal/static/integrations/github_exporter/github_exporter.go +++ b/internal/static/integrations/github_exporter/github_exporter.go @@ -8,9 +8,9 @@ import ( "github.com/githubexporter/github-exporter/exporter" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" config_util "github.com/prometheus/common/config" ) diff --git a/internal/static/integrations/github_exporter/github_test.go b/internal/static/integrations/github_exporter/github_test.go index 146db3f660..1ecc8b1f98 100644 --- a/internal/static/integrations/github_exporter/github_test.go +++ b/internal/static/integrations/github_exporter/github_test.go @@ -3,7 +3,7 @@ package github_exporter //nolint:golint import ( "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" // register github_exporter ) diff --git a/internal/static/integrations/handler_integration.go b/internal/static/integrations/handler_integration.go index c3d02388d4..b6b137cda1 100644 --- a/internal/static/integrations/handler_integration.go +++ b/internal/static/integrations/handler_integration.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // NewHandlerIntegration creates a new named integration that will call handler diff --git a/internal/static/integrations/install/install.go b/internal/static/integrations/install/install.go index 8ce2be4a3b..c96ceebf57 100644 --- a/internal/static/integrations/install/install.go +++ b/internal/static/integrations/install/install.go @@ -6,43 +6,43 @@ import ( // v1 integrations // - _ "github.com/grafana/agent/internal/static/integrations/agent" // register agent - _ "github.com/grafana/agent/internal/static/integrations/apache_http" // register apache_exporter - _ "github.com/grafana/agent/internal/static/integrations/azure_exporter" // register azure_exporter - _ "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" // register blackbox_exporter - _ "github.com/grafana/agent/internal/static/integrations/cadvisor" // register cadvisor - _ "github.com/grafana/agent/internal/static/integrations/cloudwatch_exporter" // register cloudwatch_exporter - _ "github.com/grafana/agent/internal/static/integrations/consul_exporter" // register consul_exporter - _ "github.com/grafana/agent/internal/static/integrations/dnsmasq_exporter" // register dnsmasq_exporter - _ "github.com/grafana/agent/internal/static/integrations/elasticsearch_exporter" // register elasticsearch_exporter - _ "github.com/grafana/agent/internal/static/integrations/gcp_exporter" // register gcp_exporter - _ "github.com/grafana/agent/internal/static/integrations/github_exporter" // register github_exporter - _ "github.com/grafana/agent/internal/static/integrations/kafka_exporter" // register kafka_exporter - _ "github.com/grafana/agent/internal/static/integrations/memcached_exporter" // register memcached_exporter - _ "github.com/grafana/agent/internal/static/integrations/mongodb_exporter" // register mongodb_exporter - _ "github.com/grafana/agent/internal/static/integrations/mssql" // register mssql - _ "github.com/grafana/agent/internal/static/integrations/mysqld_exporter" // register mysqld_exporter - _ "github.com/grafana/agent/internal/static/integrations/node_exporter" // register node_exporter - _ "github.com/grafana/agent/internal/static/integrations/oracledb_exporter" // register oracledb_exporter - _ "github.com/grafana/agent/internal/static/integrations/postgres_exporter" // register postgres_exporter - _ "github.com/grafana/agent/internal/static/integrations/process_exporter" // register process_exporter - _ "github.com/grafana/agent/internal/static/integrations/redis_exporter" // register redis_exporter - _ "github.com/grafana/agent/internal/static/integrations/snmp_exporter" // register snmp_exporter - _ "github.com/grafana/agent/internal/static/integrations/snowflake_exporter" // register snowflake_exporter - _ "github.com/grafana/agent/internal/static/integrations/squid_exporter" // register squid_exporter - _ "github.com/grafana/agent/internal/static/integrations/statsd_exporter" // register statsd_exporter - _ "github.com/grafana/agent/internal/static/integrations/vmware_exporter" // register vmware_exporter - _ "github.com/grafana/agent/internal/static/integrations/windows_exporter" // register windows_exporter + _ "github.com/grafana/alloy/internal/static/integrations/agent" // register agent + _ "github.com/grafana/alloy/internal/static/integrations/apache_http" // register apache_exporter + _ "github.com/grafana/alloy/internal/static/integrations/azure_exporter" // register azure_exporter + _ "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" // register blackbox_exporter + _ "github.com/grafana/alloy/internal/static/integrations/cadvisor" // register cadvisor + _ "github.com/grafana/alloy/internal/static/integrations/cloudwatch_exporter" // register cloudwatch_exporter + _ "github.com/grafana/alloy/internal/static/integrations/consul_exporter" // register consul_exporter + _ "github.com/grafana/alloy/internal/static/integrations/dnsmasq_exporter" // register dnsmasq_exporter + _ "github.com/grafana/alloy/internal/static/integrations/elasticsearch_exporter" // register elasticsearch_exporter + _ "github.com/grafana/alloy/internal/static/integrations/gcp_exporter" // register gcp_exporter + _ "github.com/grafana/alloy/internal/static/integrations/github_exporter" // register github_exporter + _ "github.com/grafana/alloy/internal/static/integrations/kafka_exporter" // register kafka_exporter + _ "github.com/grafana/alloy/internal/static/integrations/memcached_exporter" // register memcached_exporter + _ "github.com/grafana/alloy/internal/static/integrations/mongodb_exporter" // register mongodb_exporter + _ "github.com/grafana/alloy/internal/static/integrations/mssql" // register mssql + _ "github.com/grafana/alloy/internal/static/integrations/mysqld_exporter" // register mysqld_exporter + _ "github.com/grafana/alloy/internal/static/integrations/node_exporter" // register node_exporter + _ "github.com/grafana/alloy/internal/static/integrations/oracledb_exporter" // register oracledb_exporter + _ "github.com/grafana/alloy/internal/static/integrations/postgres_exporter" // register postgres_exporter + _ "github.com/grafana/alloy/internal/static/integrations/process_exporter" // register process_exporter + _ "github.com/grafana/alloy/internal/static/integrations/redis_exporter" // register redis_exporter + _ "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" // register snmp_exporter + _ "github.com/grafana/alloy/internal/static/integrations/snowflake_exporter" // register snowflake_exporter + _ "github.com/grafana/alloy/internal/static/integrations/squid_exporter" // register squid_exporter + _ "github.com/grafana/alloy/internal/static/integrations/statsd_exporter" // register statsd_exporter + _ "github.com/grafana/alloy/internal/static/integrations/vmware_exporter" // register vmware_exporter + _ "github.com/grafana/alloy/internal/static/integrations/windows_exporter" // register windows_exporter // // v2 integrations // - _ "github.com/grafana/agent/internal/static/integrations/v2/agent" // register agent - _ "github.com/grafana/agent/internal/static/integrations/v2/apache_http" // register apache_exporter - _ "github.com/grafana/agent/internal/static/integrations/v2/app_agent_receiver" // register app_agent_receiver - _ "github.com/grafana/agent/internal/static/integrations/v2/blackbox_exporter" // register blackbox_exporter - _ "github.com/grafana/agent/internal/static/integrations/v2/eventhandler" // register eventhandler - _ "github.com/grafana/agent/internal/static/integrations/v2/snmp_exporter" // register snmp_exporter - _ "github.com/grafana/agent/internal/static/integrations/v2/vmware_exporter" // register vmware_exporter + _ "github.com/grafana/alloy/internal/static/integrations/v2/agent" // register agent + _ "github.com/grafana/alloy/internal/static/integrations/v2/apache_http" // register apache_exporter + _ "github.com/grafana/alloy/internal/static/integrations/v2/app_agent_receiver" // register app_agent_receiver + _ "github.com/grafana/alloy/internal/static/integrations/v2/blackbox_exporter" // register blackbox_exporter + _ "github.com/grafana/alloy/internal/static/integrations/v2/eventhandler" // register eventhandler + _ "github.com/grafana/alloy/internal/static/integrations/v2/snmp_exporter" // register snmp_exporter + _ "github.com/grafana/alloy/internal/static/integrations/v2/vmware_exporter" // register vmware_exporter ) diff --git a/internal/static/integrations/install/install_test.go b/internal/static/integrations/install/install_test.go index fd7c2da767..3f5f640e27 100644 --- a/internal/static/integrations/install/install_test.go +++ b/internal/static/integrations/install/install_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - v1 "github.com/grafana/agent/internal/static/integrations" - v2 "github.com/grafana/agent/internal/static/integrations/v2" + v1 "github.com/grafana/alloy/internal/static/integrations" + v2 "github.com/grafana/alloy/internal/static/integrations/v2" "github.com/stretchr/testify/require" ) diff --git a/internal/static/integrations/integration.go b/internal/static/integrations/integration.go index d8ebe03875..c65616ccac 100644 --- a/internal/static/integrations/integration.go +++ b/internal/static/integrations/integration.go @@ -5,7 +5,7 @@ import ( "net/http" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // Config provides the configuration and constructor for an integration. diff --git a/internal/static/integrations/kafka_exporter/kafka_exporter.go b/internal/static/integrations/kafka_exporter/kafka_exporter.go index e0e354f74c..c864212614 100644 --- a/internal/static/integrations/kafka_exporter/kafka_exporter.go +++ b/internal/static/integrations/kafka_exporter/kafka_exporter.go @@ -8,9 +8,9 @@ import ( "github.com/IBM/sarama" kafka_exporter "github.com/davidmparrott/kafka_exporter/v2/exporter" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) // DefaultConfig holds the default settings for the kafka_lag_exporter diff --git a/internal/static/integrations/kafka_exporter/kafka_test.go b/internal/static/integrations/kafka_exporter/kafka_test.go index f8a67d2658..a0f4cdb8cb 100644 --- a/internal/static/integrations/kafka_exporter/kafka_test.go +++ b/internal/static/integrations/kafka_exporter/kafka_test.go @@ -3,7 +3,7 @@ package kafka_exporter //nolint:golint import ( "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" ) func TestConfig_SecretKafkaPassword(t *testing.T) { diff --git a/internal/static/integrations/manager.go b/internal/static/integrations/manager.go index 59760b9c7f..b43322bffb 100644 --- a/internal/static/integrations/manager.go +++ b/internal/static/integrations/manager.go @@ -6,8 +6,8 @@ import ( config_util "github.com/prometheus/common/config" - "github.com/grafana/agent/internal/static/metrics" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/static/metrics" + "github.com/grafana/alloy/internal/static/server" "github.com/prometheus/common/model" promConfig "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/static/integrations/memcached_exporter/memcached_exporter.go b/internal/static/integrations/memcached_exporter/memcached_exporter.go index 8d8e591568..24b83f7fda 100644 --- a/internal/static/integrations/memcached_exporter/memcached_exporter.go +++ b/internal/static/integrations/memcached_exporter/memcached_exporter.go @@ -10,9 +10,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/memcached_exporter/pkg/exporter" ) diff --git a/internal/static/integrations/mongodb_exporter/mongodb_exporter.go b/internal/static/integrations/mongodb_exporter/mongodb_exporter.go index a07c45071d..5f5cf352a6 100644 --- a/internal/static/integrations/mongodb_exporter/mongodb_exporter.go +++ b/internal/static/integrations/mongodb_exporter/mongodb_exporter.go @@ -10,9 +10,9 @@ import ( "github.com/percona/mongodb_exporter/exporter" config_util "github.com/prometheus/common/config" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) var DefaultConfig = Config{ diff --git a/internal/static/integrations/mongodb_exporter/mongodb_test.go b/internal/static/integrations/mongodb_exporter/mongodb_test.go index b1fac94934..cd4e052427 100644 --- a/internal/static/integrations/mongodb_exporter/mongodb_test.go +++ b/internal/static/integrations/mongodb_exporter/mongodb_test.go @@ -3,7 +3,7 @@ package mongodb_exporter //nolint:golint import ( "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" ) func TestConfig_SecretMongoDB(t *testing.T) { diff --git a/internal/static/integrations/mssql/sql_exporter.go b/internal/static/integrations/mssql/sql_exporter.go index f378b7f90f..6e7cd41402 100644 --- a/internal/static/integrations/mssql/sql_exporter.go +++ b/internal/static/integrations/mssql/sql_exporter.go @@ -13,10 +13,10 @@ import ( "github.com/burningalchemist/sql_exporter" "github.com/burningalchemist/sql_exporter/config" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/common/model" ) diff --git a/internal/static/integrations/mysqld_exporter/mysqld-exporter.go b/internal/static/integrations/mysqld_exporter/mysqld-exporter.go index eae4294fda..eb01ab285c 100644 --- a/internal/static/integrations/mysqld_exporter/mysqld-exporter.go +++ b/internal/static/integrations/mysqld_exporter/mysqld-exporter.go @@ -11,9 +11,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/go-sql-driver/mysql" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/mysqld_exporter/collector" ) diff --git a/internal/static/integrations/mysqld_exporter/mysqld_test.go b/internal/static/integrations/mysqld_exporter/mysqld_test.go index 7c620ee409..ade15a3c66 100644 --- a/internal/static/integrations/mysqld_exporter/mysqld_test.go +++ b/internal/static/integrations/mysqld_exporter/mysqld_test.go @@ -3,7 +3,7 @@ package mysqld_exporter //nolint:golint import ( "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" ) func TestConfig_SecretMysqlD(t *testing.T) { diff --git a/internal/static/integrations/node_exporter/config.go b/internal/static/integrations/node_exporter/config.go index 4dda0d7266..b49f46dcc1 100644 --- a/internal/static/integrations/node_exporter/config.go +++ b/internal/static/integrations/node_exporter/config.go @@ -8,9 +8,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/grafana/dskit/flagext" "github.com/prometheus/node_exporter/collector" "github.com/prometheus/procfs" diff --git a/internal/static/integrations/node_exporter/node_exporter.go b/internal/static/integrations/node_exporter/node_exporter.go index 3833c5103e..92f4cbea3b 100644 --- a/internal/static/integrations/node_exporter/node_exporter.go +++ b/internal/static/integrations/node_exporter/node_exporter.go @@ -10,8 +10,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/static/integrations/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/node_exporter/collector" diff --git a/internal/static/integrations/node_exporter/node_exporter_windows.go b/internal/static/integrations/node_exporter/node_exporter_windows.go index 0dfdf44601..be3f5fdfea 100644 --- a/internal/static/integrations/node_exporter/node_exporter_windows.go +++ b/internal/static/integrations/node_exporter/node_exporter_windows.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // Integration is the node_exporter integration. On Windows platforms, diff --git a/internal/static/integrations/oracledb_exporter/oracledb_exporter.go b/internal/static/integrations/oracledb_exporter/oracledb_exporter.go index c4385e5aa2..8c967d3549 100644 --- a/internal/static/integrations/oracledb_exporter/oracledb_exporter.go +++ b/internal/static/integrations/oracledb_exporter/oracledb_exporter.go @@ -7,14 +7,14 @@ import ( "os" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" oe "github.com/iamseth/oracledb_exporter/collector" // required driver for integration _ "github.com/sijms/go-ora/v2" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" config_util "github.com/prometheus/common/config" ) diff --git a/internal/static/integrations/postgres_exporter/postgres_exporter.go b/internal/static/integrations/postgres_exporter/postgres_exporter.go index aa375be8c4..44b52405b1 100644 --- a/internal/static/integrations/postgres_exporter/postgres_exporter.go +++ b/internal/static/integrations/postgres_exporter/postgres_exporter.go @@ -9,9 +9,9 @@ import ( config_util "github.com/prometheus/common/config" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/lib/pq" "github.com/prometheus-community/postgres_exporter/exporter" ) diff --git a/internal/static/integrations/postgres_exporter/postgres_test.go b/internal/static/integrations/postgres_exporter/postgres_test.go index cb189ee719..b1112ee27b 100644 --- a/internal/static/integrations/postgres_exporter/postgres_test.go +++ b/internal/static/integrations/postgres_exporter/postgres_test.go @@ -3,7 +3,7 @@ package postgres_exporter //nolint:golint import ( "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" ) func TestConfig_SecretPostgres(t *testing.T) { diff --git a/internal/static/integrations/process_exporter/config.go b/internal/static/integrations/process_exporter/config.go index a43bb45074..121d8208d4 100644 --- a/internal/static/integrations/process_exporter/config.go +++ b/internal/static/integrations/process_exporter/config.go @@ -3,9 +3,9 @@ package process_exporter //nolint:golint import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" exporter_config "github.com/ncabatoff/process-exporter/config" ) diff --git a/internal/static/integrations/process_exporter/process-exporter.go b/internal/static/integrations/process_exporter/process-exporter.go index 14f9d14db9..9f64560f5d 100644 --- a/internal/static/integrations/process_exporter/process-exporter.go +++ b/internal/static/integrations/process_exporter/process-exporter.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // Integration is the process_exporter integration. On non-Linux platforms, diff --git a/internal/static/integrations/process_exporter/process-exporter_linux.go b/internal/static/integrations/process_exporter/process-exporter_linux.go index 1194f7e11c..d45d82f7ff 100644 --- a/internal/static/integrations/process_exporter/process-exporter_linux.go +++ b/internal/static/integrations/process_exporter/process-exporter_linux.go @@ -7,8 +7,8 @@ import ( "net/http" "github.com/go-kit/log" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/static/integrations/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" diff --git a/internal/static/integrations/redis_exporter/redis_exporter.go b/internal/static/integrations/redis_exporter/redis_exporter.go index 3b34b20079..8335162ab0 100644 --- a/internal/static/integrations/redis_exporter/redis_exporter.go +++ b/internal/static/integrations/redis_exporter/redis_exporter.go @@ -8,9 +8,9 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/go-kit/log" "github.com/go-kit/log/level" diff --git a/internal/static/integrations/redis_exporter/redis_exporter_test.go b/internal/static/integrations/redis_exporter/redis_exporter_test.go index 6b10be315f..47965e5050 100644 --- a/internal/static/integrations/redis_exporter/redis_exporter_test.go +++ b/internal/static/integrations/redis_exporter/redis_exporter_test.go @@ -8,7 +8,7 @@ import ( "net/http/httptest" "testing" - "github.com/grafana/agent/internal/static/config" + "github.com/grafana/alloy/internal/static/config" "gopkg.in/yaml.v2" "github.com/go-kit/log" diff --git a/internal/static/integrations/register.go b/internal/static/integrations/register.go index 9a5c4734ee..006ca47347 100644 --- a/internal/static/integrations/register.go +++ b/internal/static/integrations/register.go @@ -5,8 +5,8 @@ import ( "reflect" "strings" - "github.com/grafana/agent/internal/static/integrations/config" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/integrations/config" + "github.com/grafana/alloy/internal/util" "gopkg.in/yaml.v2" ) diff --git a/internal/static/integrations/snmp_exporter/snmp_exporter.go b/internal/static/integrations/snmp_exporter/snmp_exporter.go index c03ead4a47..de2246101f 100644 --- a/internal/static/integrations/snmp_exporter/snmp_exporter.go +++ b/internal/static/integrations/snmp_exporter/snmp_exporter.go @@ -8,9 +8,9 @@ import ( "net/url" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/config" - snmp_common "github.com/grafana/agent/internal/static/integrations/snmp_exporter/common" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/config" + snmp_common "github.com/grafana/alloy/internal/static/integrations/snmp_exporter/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/snmp_exporter/collector" diff --git a/internal/static/integrations/snowflake_exporter/snowflake_exporter.go b/internal/static/integrations/snowflake_exporter/snowflake_exporter.go index ef01a7cf38..6013e07f4b 100644 --- a/internal/static/integrations/snowflake_exporter/snowflake_exporter.go +++ b/internal/static/integrations/snowflake_exporter/snowflake_exporter.go @@ -2,9 +2,9 @@ package snowflake_exporter import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/grafana/snowflake-prometheus-exporter/collector" config_util "github.com/prometheus/common/config" ) diff --git a/internal/static/integrations/squid_exporter/squid_exporter.go b/internal/static/integrations/squid_exporter/squid_exporter.go index 9ec368f70b..b596ef11f8 100644 --- a/internal/static/integrations/squid_exporter/squid_exporter.go +++ b/internal/static/integrations/squid_exporter/squid_exporter.go @@ -8,11 +8,11 @@ import ( se "github.com/boynux/squid-exporter/collector" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" config_util "github.com/prometheus/common/config" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) var ( diff --git a/internal/static/integrations/statsd_exporter/statsd_exporter.go b/internal/static/integrations/statsd_exporter/statsd_exporter.go index 6b0f7a8fbe..1ead241def 100644 --- a/internal/static/integrations/statsd_exporter/statsd_exporter.go +++ b/internal/static/integrations/statsd_exporter/statsd_exporter.go @@ -12,11 +12,11 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/build" - "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/config" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/build" + "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/config" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/statsd_exporter/pkg/address" diff --git a/internal/static/integrations/v2/agent/agent.go b/internal/static/integrations/v2/agent/agent.go index deeaa1d8ca..2e669b3c15 100644 --- a/internal/static/integrations/v2/agent/agent.go +++ b/internal/static/integrations/v2/agent/agent.go @@ -5,9 +5,9 @@ package agent import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/client_golang/prometheus/promhttp" ) diff --git a/internal/static/integrations/v2/apache_http/apache_http.go b/internal/static/integrations/v2/apache_http/apache_http.go index cdffaebab2..69160bb4ca 100644 --- a/internal/static/integrations/v2/apache_http/apache_http.go +++ b/internal/static/integrations/v2/apache_http/apache_http.go @@ -8,9 +8,9 @@ import ( ae "github.com/Lusitaniae/apache_exporter/collector" "github.com/go-kit/log" "github.com/go-kit/log/level" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) diff --git a/internal/static/integrations/v2/apache_http/apache_http_test.go b/internal/static/integrations/v2/apache_http/apache_http_test.go index 8480eb1b56..3e3e1f6306 100644 --- a/internal/static/integrations/v2/apache_http/apache_http_test.go +++ b/internal/static/integrations/v2/apache_http/apache_http_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" "github.com/stretchr/testify/require" ) diff --git a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go b/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go index 9145115fd5..83a0bcc958 100644 --- a/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go +++ b/internal/static/integrations/v2/app_agent_receiver/app_agent_receiver.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2" ) func init() { diff --git a/internal/static/integrations/v2/app_agent_receiver/config.go b/internal/static/integrations/v2/app_agent_receiver/config.go index 32d1b12389..fd49908ca2 100644 --- a/internal/static/integrations/v2/app_agent_receiver/config.go +++ b/internal/static/integrations/v2/app_agent_receiver/config.go @@ -3,8 +3,8 @@ package app_agent_receiver import ( "time" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" ) const ( diff --git a/internal/static/integrations/v2/blackbox_exporter/blackbox.go b/internal/static/integrations/v2/blackbox_exporter/blackbox.go index e06a20d968..f59e571932 100644 --- a/internal/static/integrations/v2/blackbox_exporter/blackbox.go +++ b/internal/static/integrations/v2/blackbox_exporter/blackbox.go @@ -8,10 +8,10 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" blackbox_config "github.com/prometheus/blackbox_exporter/config" "github.com/prometheus/blackbox_exporter/prober" "github.com/prometheus/common/model" diff --git a/internal/static/integrations/v2/blackbox_exporter/blackbox_exporter.go b/internal/static/integrations/v2/blackbox_exporter/blackbox_exporter.go index 1f4ffdc489..73db0f49f3 100644 --- a/internal/static/integrations/v2/blackbox_exporter/blackbox_exporter.go +++ b/internal/static/integrations/v2/blackbox_exporter/blackbox_exporter.go @@ -2,10 +2,10 @@ package blackbox_exporter_v2 import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/util" blackbox_config "github.com/prometheus/blackbox_exporter/config" "gopkg.in/yaml.v3" ) diff --git a/internal/static/integrations/v2/blackbox_exporter/blackbox_test.go b/internal/static/integrations/v2/blackbox_exporter/blackbox_test.go index 9b798f1ad3..e346bb31bc 100644 --- a/internal/static/integrations/v2/blackbox_exporter/blackbox_test.go +++ b/internal/static/integrations/v2/blackbox_exporter/blackbox_test.go @@ -3,11 +3,11 @@ package blackbox_exporter_v2 import ( "testing" - "github.com/grafana/agent/internal/static/integrations/blackbox_exporter" - "github.com/grafana/agent/internal/static/integrations/v2" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - autoscrape "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/blackbox_exporter" + "github.com/grafana/alloy/internal/static/integrations/v2" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + autoscrape "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/integrations/v2/common" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/require" diff --git a/internal/static/integrations/v2/common/metrics.go b/internal/static/integrations/v2/common/metrics.go index 1e83a8bb94..14289acebb 100644 --- a/internal/static/integrations/v2/common/metrics.go +++ b/internal/static/integrations/v2/common/metrics.go @@ -1,7 +1,7 @@ package common import ( - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" "github.com/prometheus/prometheus/model/labels" ) diff --git a/internal/static/integrations/v2/eventhandler/integration.go b/internal/static/integrations/v2/eventhandler/integration.go index 4453aeefce..78e33e0741 100644 --- a/internal/static/integrations/v2/eventhandler/integration.go +++ b/internal/static/integrations/v2/eventhandler/integration.go @@ -4,7 +4,7 @@ import ( "context" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2" "github.com/prometheus/prometheus/model/labels" ) diff --git a/internal/static/integrations/v2/integrations.go b/internal/static/integrations/v2/integrations.go index 3c5ba8d3ab..5a5348b233 100644 --- a/internal/static/integrations/v2/integrations.go +++ b/internal/static/integrations/v2/integrations.go @@ -25,8 +25,8 @@ import ( "net/url" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/server" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/server" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) diff --git a/internal/static/integrations/v2/metricsutils/metricshandler_integration.go b/internal/static/integrations/v2/metricsutils/metricshandler_integration.go index 1978619091..fe9cbeba7e 100644 --- a/internal/static/integrations/v2/metricsutils/metricshandler_integration.go +++ b/internal/static/integrations/v2/metricsutils/metricshandler_integration.go @@ -8,9 +8,9 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/integrations/v2/common" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/internal/static/integrations/v2/metricsutils/metricshandler_integration_test.go b/internal/static/integrations/v2/metricsutils/metricshandler_integration_test.go index 643b2ff25b..c45d653d52 100644 --- a/internal/static/integrations/v2/metricsutils/metricshandler_integration_test.go +++ b/internal/static/integrations/v2/metricsutils/metricshandler_integration_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" diff --git a/internal/static/integrations/v2/metricsutils/versionshim.go b/internal/static/integrations/v2/metricsutils/versionshim.go index 75908ec9ce..230cdbeeca 100644 --- a/internal/static/integrations/v2/metricsutils/versionshim.go +++ b/internal/static/integrations/v2/metricsutils/versionshim.go @@ -9,10 +9,10 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/model" - v1 "github.com/grafana/agent/internal/static/integrations" - v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/util" + v1 "github.com/grafana/alloy/internal/static/integrations" + v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/util" ) // NewNamedShim returns a v2.UpgradeFunc which will upgrade a v1.Config to a diff --git a/internal/static/integrations/v2/register.go b/internal/static/integrations/v2/register.go index 30e8064174..5b68b1b3b0 100644 --- a/internal/static/integrations/v2/register.go +++ b/internal/static/integrations/v2/register.go @@ -8,9 +8,9 @@ import ( "gopkg.in/yaml.v2" - v1 "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/util" + v1 "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/util" ) var ( diff --git a/internal/static/integrations/v2/register_test.go b/internal/static/integrations/v2/register_test.go index 5515910229..a3962a2d23 100644 --- a/internal/static/integrations/v2/register_test.go +++ b/internal/static/integrations/v2/register_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/go-kit/log" - v1 "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/v2/common" + v1 "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/v2/common" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) diff --git a/internal/static/integrations/v2/snmp_exporter/snmp.go b/internal/static/integrations/v2/snmp_exporter/snmp.go index dc78dad5ed..df6bee98b9 100644 --- a/internal/static/integrations/v2/snmp_exporter/snmp.go +++ b/internal/static/integrations/v2/snmp_exporter/snmp.go @@ -7,10 +7,10 @@ import ( "path" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" diff --git a/internal/static/integrations/v2/snmp_exporter/snmp_exporter.go b/internal/static/integrations/v2/snmp_exporter/snmp_exporter.go index bea9d944de..842dfc014b 100644 --- a/internal/static/integrations/v2/snmp_exporter/snmp_exporter.go +++ b/internal/static/integrations/v2/snmp_exporter/snmp_exporter.go @@ -3,9 +3,9 @@ package snmp_exporter_v2 import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/snmp_exporter" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/snmp_exporter" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" snmp_config "github.com/prometheus/snmp_exporter/config" ) diff --git a/internal/static/integrations/v2/subsystem.go b/internal/static/integrations/v2/subsystem.go index ce501b37c6..6f8572ef6a 100644 --- a/internal/static/integrations/v2/subsystem.go +++ b/internal/static/integrations/v2/subsystem.go @@ -1,8 +1,8 @@ package integrations import ( - "github.com/grafana/agent/internal/static/integrations/v2/autoscrape" - "github.com/grafana/agent/internal/static/metrics" + "github.com/grafana/alloy/internal/static/integrations/v2/autoscrape" + "github.com/grafana/alloy/internal/static/metrics" ) const ( diff --git a/internal/static/integrations/v2/subsystem_test.go b/internal/static/integrations/v2/subsystem_test.go index ddc223b736..33c5fc4adf 100644 --- a/internal/static/integrations/v2/subsystem_test.go +++ b/internal/static/integrations/v2/subsystem_test.go @@ -3,8 +3,8 @@ package integrations import ( "testing" - v1 "github.com/grafana/agent/internal/static/integrations" - "github.com/grafana/agent/internal/static/integrations/v2/common" + v1 "github.com/grafana/alloy/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations/v2/common" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) diff --git a/internal/static/integrations/v2/utils.go b/internal/static/integrations/v2/utils.go index 138cf18b0c..7da61b5628 100644 --- a/internal/static/integrations/v2/utils.go +++ b/internal/static/integrations/v2/utils.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" ) // FuncIntegration is a function that implements Integration. diff --git a/internal/static/integrations/v2/vmware_exporter/vmware_exporter.go b/internal/static/integrations/v2/vmware_exporter/vmware_exporter.go index 257ed80812..c4a98614e0 100644 --- a/internal/static/integrations/v2/vmware_exporter/vmware_exporter.go +++ b/internal/static/integrations/v2/vmware_exporter/vmware_exporter.go @@ -6,9 +6,9 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/common" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/common" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" "github.com/grafana/vmware_exporter/vsphere" config_util "github.com/prometheus/common/config" ) diff --git a/internal/static/integrations/vmware_exporter/vmware_exporter.go b/internal/static/integrations/vmware_exporter/vmware_exporter.go index b84e209d97..329c562ea3 100644 --- a/internal/static/integrations/vmware_exporter/vmware_exporter.go +++ b/internal/static/integrations/vmware_exporter/vmware_exporter.go @@ -6,7 +6,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" "github.com/grafana/vmware_exporter/vsphere" config_util "github.com/prometheus/common/config" ) diff --git a/internal/static/integrations/windows_exporter/config.go b/internal/static/integrations/windows_exporter/config.go index f3c3cadafe..6803c4f986 100644 --- a/internal/static/integrations/windows_exporter/config.go +++ b/internal/static/integrations/windows_exporter/config.go @@ -2,9 +2,9 @@ package windows_exporter //nolint:golint import ( "github.com/go-kit/log" - "github.com/grafana/agent/internal/static/integrations" - integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" - "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" + "github.com/grafana/alloy/internal/static/integrations" + integrations_v2 "github.com/grafana/alloy/internal/static/integrations/v2" + "github.com/grafana/alloy/internal/static/integrations/v2/metricsutils" ) func init() { diff --git a/internal/static/integrations/windows_exporter/windows_exporter.go b/internal/static/integrations/windows_exporter/windows_exporter.go index 55d02ac496..c4b9daaaeb 100644 --- a/internal/static/integrations/windows_exporter/windows_exporter.go +++ b/internal/static/integrations/windows_exporter/windows_exporter.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations/config" + "github.com/grafana/alloy/internal/static/integrations/config" ) // Integration is the windows_exporter integration. On non-Windows platforms, diff --git a/internal/static/integrations/windows_exporter/windows_exporter_windows.go b/internal/static/integrations/windows_exporter/windows_exporter_windows.go index 83faf6c27d..5ca468cf85 100644 --- a/internal/static/integrations/windows_exporter/windows_exporter_windows.go +++ b/internal/static/integrations/windows_exporter/windows_exporter_windows.go @@ -7,7 +7,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/static/integrations" + "github.com/grafana/alloy/internal/static/integrations" "github.com/prometheus-community/windows_exporter/pkg/collector" ) diff --git a/internal/static/logs/logs.go b/internal/static/logs/logs.go index 8dd2035341..a35da83f89 100644 --- a/internal/static/logs/logs.go +++ b/internal/static/logs/logs.go @@ -4,7 +4,7 @@ package logs import ( _ "time/tzdata" // embed timezone data - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/useragent" "github.com/grafana/loki/clients/pkg/promtail/client" "github.com/grafana/loki/clients/pkg/promtail/config" "github.com/grafana/loki/clients/pkg/promtail/server" diff --git a/internal/static/metrics/agent.go b/internal/static/metrics/agent.go index 6ef123c1ec..cb7f610842 100644 --- a/internal/static/metrics/agent.go +++ b/internal/static/metrics/agent.go @@ -9,10 +9,10 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/static/metrics/cluster" - "github.com/grafana/agent/internal/static/metrics/cluster/client" - "github.com/grafana/agent/internal/static/metrics/instance" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/metrics/cluster" + "github.com/grafana/alloy/internal/static/metrics/cluster/client" + "github.com/grafana/alloy/internal/static/metrics/instance" + "github.com/grafana/alloy/internal/util" ) // DefaultConfig is the default settings for the Prometheus-lite client. diff --git a/internal/static/metrics/agent_test.go b/internal/static/metrics/agent_test.go index 2d1d063b20..7669723743 100644 --- a/internal/static/metrics/agent_test.go +++ b/internal/static/metrics/agent_test.go @@ -4,7 +4,7 @@ import ( "errors" "testing" - "github.com/grafana/agent/internal/static/metrics/instance" + "github.com/grafana/alloy/internal/static/metrics/instance" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) diff --git a/internal/static/metrics/cluster/client/client.go b/internal/static/metrics/cluster/client/client.go index b4180ab3b0..03cb8de175 100644 --- a/internal/static/metrics/cluster/client/client.go +++ b/internal/static/metrics/cluster/client/client.go @@ -4,7 +4,7 @@ import ( "flag" "reflect" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/grafana/dskit/grpcclient" ) diff --git a/internal/static/metrics/cluster/config.go b/internal/static/metrics/cluster/config.go index f51a529c04..6774ac80a5 100644 --- a/internal/static/metrics/cluster/config.go +++ b/internal/static/metrics/cluster/config.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/grafana/agent/internal/static/metrics/cluster/client" - flagutil "github.com/grafana/agent/internal/util" - util_log "github.com/grafana/agent/internal/util/log" + "github.com/grafana/alloy/internal/static/metrics/cluster/client" + flagutil "github.com/grafana/alloy/internal/util" + util_log "github.com/grafana/alloy/internal/util/log" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" ) diff --git a/internal/static/metrics/instance/instance.go b/internal/static/metrics/instance/instance.go index db8e22109c..59b3380901 100644 --- a/internal/static/metrics/instance/instance.go +++ b/internal/static/metrics/instance/instance.go @@ -9,7 +9,7 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/useragent" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/scrape" diff --git a/internal/static/metrics/wal/wal_test.go b/internal/static/metrics/wal/wal_test.go index 14bc252ff0..8563b0b400 100644 --- a/internal/static/metrics/wal/wal_test.go +++ b/internal/static/metrics/wal/wal_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" diff --git a/internal/static/server/config.go b/internal/static/server/config.go index ce5ffa99e8..5a85a79997 100644 --- a/internal/static/server/config.go +++ b/internal/static/server/config.go @@ -3,7 +3,7 @@ package server import ( "flag" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging" "github.com/grafana/dskit/log" ) diff --git a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go index 0624e1a54d..303f5d89f8 100644 --- a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go +++ b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor.go @@ -7,7 +7,7 @@ import ( "time" "github.com/go-kit/log" - util_log "github.com/grafana/agent/internal/util/log" + util_log "github.com/grafana/alloy/internal/util/log" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go index b02b7ba7f9..c4dc12ba4c 100644 --- a/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go +++ b/internal/static/traces/automaticloggingprocessor/automaticloggingprocessor_test.go @@ -4,8 +4,8 @@ import ( "context" "testing" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/logs" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "gopkg.in/yaml.v3" diff --git a/internal/static/traces/automaticloggingprocessor/factory.go b/internal/static/traces/automaticloggingprocessor/factory.go index bab898f972..0e2e753ebe 100644 --- a/internal/static/traces/automaticloggingprocessor/factory.go +++ b/internal/static/traces/automaticloggingprocessor/factory.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/grafana/agent/internal/static/logs" + "github.com/grafana/alloy/internal/static/logs" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" diff --git a/internal/static/traces/config.go b/internal/static/traces/config.go index 12aa4c784e..2c98e7bda0 100644 --- a/internal/static/traces/config.go +++ b/internal/static/traces/config.go @@ -11,7 +11,7 @@ import ( "strings" "time" - promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" + promsdconsumer "github.com/grafana/alloy/internal/static/traces/promsdprocessor/consumer" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter" @@ -40,14 +40,14 @@ import ( "go.uber.org/multierr" "gopkg.in/yaml.v2" - "github.com/grafana/agent/internal/static/logs" - "github.com/grafana/agent/internal/static/traces/automaticloggingprocessor" - "github.com/grafana/agent/internal/static/traces/noopreceiver" - "github.com/grafana/agent/internal/static/traces/promsdprocessor" - "github.com/grafana/agent/internal/static/traces/pushreceiver" - "github.com/grafana/agent/internal/static/traces/remotewriteexporter" - "github.com/grafana/agent/internal/static/traces/servicegraphprocessor" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/static/logs" + "github.com/grafana/alloy/internal/static/traces/automaticloggingprocessor" + "github.com/grafana/alloy/internal/static/traces/noopreceiver" + "github.com/grafana/alloy/internal/static/traces/promsdprocessor" + "github.com/grafana/alloy/internal/static/traces/pushreceiver" + "github.com/grafana/alloy/internal/static/traces/remotewriteexporter" + "github.com/grafana/alloy/internal/static/traces/servicegraphprocessor" + "github.com/grafana/alloy/internal/util" ) const ( diff --git a/internal/static/traces/config_test.go b/internal/static/traces/config_test.go index fb8443abab..eb95372b22 100644 --- a/internal/static/traces/config_test.go +++ b/internal/static/traces/config_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/grafana/agent/internal/static/traces/pushreceiver" + "github.com/grafana/alloy/internal/static/traces/pushreceiver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" diff --git a/internal/static/traces/promsdprocessor/consumer/consumer.go b/internal/static/traces/promsdprocessor/consumer/consumer.go index a5825196d6..d3660558ee 100644 --- a/internal/static/traces/promsdprocessor/consumer/consumer.go +++ b/internal/static/traces/promsdprocessor/consumer/consumer.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/client" otelcomponent "go.opentelemetry.io/collector/component" diff --git a/internal/static/traces/promsdprocessor/consumer/consumer_test.go b/internal/static/traces/promsdprocessor/consumer/consumer_test.go index 87730834d7..758b4095e5 100644 --- a/internal/static/traces/promsdprocessor/consumer/consumer_test.go +++ b/internal/static/traces/promsdprocessor/consumer/consumer_test.go @@ -5,8 +5,8 @@ import ( "net" "testing" - "github.com/grafana/agent/internal/component/discovery" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/component/discovery" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/consumer/consumertest" diff --git a/internal/static/traces/promsdprocessor/prom_sd_processor.go b/internal/static/traces/promsdprocessor/prom_sd_processor.go index c88bfbca33..181a8a05e9 100644 --- a/internal/static/traces/promsdprocessor/prom_sd_processor.go +++ b/internal/static/traces/promsdprocessor/prom_sd_processor.go @@ -6,9 +6,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/component/discovery" - promsdconsumer "github.com/grafana/agent/internal/static/traces/promsdprocessor/consumer" - util "github.com/grafana/agent/internal/util/log" + "github.com/grafana/alloy/internal/component/discovery" + promsdconsumer "github.com/grafana/alloy/internal/static/traces/promsdprocessor/consumer" + util "github.com/grafana/alloy/internal/util/log" "github.com/prometheus/prometheus/config" promdiscovery "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/internal/static/traces/promsdprocessor/prom_sd_processor_test.go b/internal/static/traces/promsdprocessor/prom_sd_processor_test.go index 1c703a2220..3641168149 100644 --- a/internal/static/traces/promsdprocessor/prom_sd_processor_test.go +++ b/internal/static/traces/promsdprocessor/prom_sd_processor_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/go-kit/log" - "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/alloy/internal/component/discovery" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/relabel" diff --git a/internal/static/traces/servicegraphprocessor/processor.go b/internal/static/traces/servicegraphprocessor/processor.go index b99648f38e..95c2ff36dd 100644 --- a/internal/static/traces/servicegraphprocessor/processor.go +++ b/internal/static/traces/servicegraphprocessor/processor.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - util "github.com/grafana/agent/internal/util/log" + util "github.com/grafana/alloy/internal/util/log" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/internal/static/traces/servicegraphprocessor/processor_test.go b/internal/static/traces/servicegraphprocessor/processor_test.go index d2ba70f4b0..6337d17053 100644 --- a/internal/static/traces/servicegraphprocessor/processor_test.go +++ b/internal/static/traces/servicegraphprocessor/processor_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/static/traces/traceutils" + "github.com/grafana/alloy/internal/static/traces/traceutils" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" diff --git a/internal/static/traces/traceutils/server.go b/internal/static/traces/traceutils/server.go index f22c445953..9ad1ed2a08 100644 --- a/internal/static/traces/traceutils/server.go +++ b/internal/static/traces/traceutils/server.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/grafana/agent/internal/util" + "github.com/grafana/alloy/internal/util" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" diff --git a/internal/tools/docs_generator/compatible_components_page.go b/internal/tools/docs_generator/compatible_components_page.go index 960294ea4b..35689df5be 100644 --- a/internal/tools/docs_generator/compatible_components_page.go +++ b/internal/tools/docs_generator/compatible_components_page.go @@ -5,7 +5,7 @@ import ( "sort" "strings" - "github.com/grafana/agent/internal/component/metadata" + "github.com/grafana/alloy/internal/component/metadata" "golang.org/x/exp/maps" ) diff --git a/internal/tools/docs_generator/docs_generator.go b/internal/tools/docs_generator/docs_generator.go index 071f80eb50..9a9f33fddf 100644 --- a/internal/tools/docs_generator/docs_generator.go +++ b/internal/tools/docs_generator/docs_generator.go @@ -5,8 +5,8 @@ import ( "fmt" "os" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/metadata" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/component/metadata" ) type DocsGenerator interface { diff --git a/internal/tools/docs_generator/docs_updated_test.go b/internal/tools/docs_generator/docs_updated_test.go index 3c45abe4df..65358f908a 100644 --- a/internal/tools/docs_generator/docs_updated_test.go +++ b/internal/tools/docs_generator/docs_updated_test.go @@ -8,10 +8,10 @@ import ( "strings" "testing" - "github.com/grafana/agent/internal/component" - _ "github.com/grafana/agent/internal/component/all" - "github.com/grafana/agent/internal/component/metadata" - generator "github.com/grafana/agent/internal/tools/docs_generator" + "github.com/grafana/alloy/internal/component" + _ "github.com/grafana/alloy/internal/component/all" + "github.com/grafana/alloy/internal/component/metadata" + generator "github.com/grafana/alloy/internal/tools/docs_generator" "github.com/stretchr/testify/require" ) diff --git a/internal/tools/docs_generator/links_to_types.go b/internal/tools/docs_generator/links_to_types.go index e6960fe83b..4a49fd45f7 100644 --- a/internal/tools/docs_generator/links_to_types.go +++ b/internal/tools/docs_generator/links_to_types.go @@ -5,7 +5,7 @@ import ( "regexp" "strings" - "github.com/grafana/agent/internal/component/metadata" + "github.com/grafana/alloy/internal/component/metadata" ) type LinksToTypesGenerator struct { diff --git a/internal/usagestats/reporter.go b/internal/usagestats/reporter.go index a175e154b9..b8fd90228e 100644 --- a/internal/usagestats/reporter.go +++ b/internal/usagestats/reporter.go @@ -7,7 +7,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/agentseed" + "github.com/grafana/alloy/internal/agentseed" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/multierror" ) diff --git a/internal/usagestats/stats.go b/internal/usagestats/stats.go index 33fffd157e..b1b08c48dc 100644 --- a/internal/usagestats/stats.go +++ b/internal/usagestats/stats.go @@ -10,8 +10,8 @@ import ( "runtime" "time" - "github.com/grafana/agent/internal/agentseed" - "github.com/grafana/agent/internal/useragent" + "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/useragent" "github.com/prometheus/common/version" ) diff --git a/internal/useragent/useragent.go b/internal/useragent/useragent.go index 1c6739fb50..10eb4fce6f 100644 --- a/internal/useragent/useragent.go +++ b/internal/useragent/useragent.go @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "github.com/grafana/agent/internal/build" + "github.com/grafana/alloy/internal/build" ) const ( diff --git a/internal/useragent/useragent_test.go b/internal/useragent/useragent_test.go index 6a94fe0b28..3502ec2137 100644 --- a/internal/useragent/useragent_test.go +++ b/internal/useragent/useragent_test.go @@ -3,7 +3,7 @@ package useragent import ( "testing" - "github.com/grafana/agent/internal/build" + "github.com/grafana/alloy/internal/build" "github.com/stretchr/testify/require" ) diff --git a/internal/util/test_logger.go b/internal/util/test_logger.go index a158953af2..9a9795ee64 100644 --- a/internal/util/test_logger.go +++ b/internal/util/test_logger.go @@ -6,7 +6,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/alloy/internal/flow/logging" "github.com/stretchr/testify/require" ) diff --git a/internal/util/testappender/testappender.go b/internal/util/testappender/testappender.go index 0417329000..88bd00a70f 100644 --- a/internal/util/testappender/testappender.go +++ b/internal/util/testappender/testappender.go @@ -5,7 +5,7 @@ package testappender import ( "fmt" - "github.com/grafana/agent/internal/util/testappender/internal/dtobuilder" + "github.com/grafana/alloy/internal/util/testappender/internal/dtobuilder" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" diff --git a/internal/util/testappender/testappender_test.go b/internal/util/testappender/testappender_test.go index 797f4cfde9..db291fbc56 100644 --- a/internal/util/testappender/testappender_test.go +++ b/internal/util/testappender/testappender_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/grafana/agent/internal/util/testappender" + "github.com/grafana/alloy/internal/util/testappender" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" diff --git a/internal/util/wildcard/match_test.go b/internal/util/wildcard/match_test.go index eb0728530b..f99b57e4c9 100644 --- a/internal/util/wildcard/match_test.go +++ b/internal/util/wildcard/match_test.go @@ -19,7 +19,7 @@ package wildcard_test import ( "testing" - "github.com/grafana/agent/internal/util/wildcard" + "github.com/grafana/alloy/internal/util/wildcard" ) // TestMatch - Tests validate the logic of wild card matching. diff --git a/internal/util/zapadapter/zapadapter_test.go b/internal/util/zapadapter/zapadapter_test.go index c0881ed4ba..8b6747740d 100644 --- a/internal/util/zapadapter/zapadapter_test.go +++ b/internal/util/zapadapter/zapadapter_test.go @@ -10,7 +10,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/agent/internal/util/zapadapter" + "github.com/grafana/alloy/internal/util/zapadapter" "github.com/stretchr/testify/require" "go.uber.org/zap" ) diff --git a/internal/vcs/git_test.go b/internal/vcs/git_test.go index a7614eb950..6ec47fa6d8 100644 --- a/internal/vcs/git_test.go +++ b/internal/vcs/git_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" - "github.com/grafana/agent/internal/vcs" + "github.com/grafana/alloy/internal/vcs" "github.com/stretchr/testify/require" ) diff --git a/internal/web/api/api.go b/internal/web/api/api.go index f57806c951..a6e34dc38e 100644 --- a/internal/web/api/api.go +++ b/internal/web/api/api.go @@ -10,9 +10,9 @@ import ( "path" "github.com/gorilla/mux" - "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/service" - "github.com/grafana/agent/internal/service/cluster" + "github.com/grafana/alloy/internal/component" + "github.com/grafana/alloy/internal/service" + "github.com/grafana/alloy/internal/service/cluster" "github.com/prometheus/prometheus/util/httputil" ) diff --git a/internal/web/ui/ui.go b/internal/web/ui/ui.go index 82a910415f..3e7ff81ce0 100644 --- a/internal/web/ui/ui.go +++ b/internal/web/ui/ui.go @@ -31,7 +31,7 @@ import ( // invoked after all other routes have been registered. // // RegisterRoutes is not intended for public use and will only work properly -// when called from github.com/grafana/agent. +// when called from github.com/grafana/alloy. func RegisterRoutes(pathPrefix string, router *mux.Router) { if !strings.HasSuffix(pathPrefix, "/") { pathPrefix = pathPrefix + "/" From cd210de297a8fcc50c43910b71ad095938e2118f Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 15:38:36 -0400 Subject: [PATCH 053/136] misc: rename some references of grafana/agent to grafana/alloy --- Makefile | 2 +- README.md | 2 +- docs/developer/contributing.md | 10 +++++----- docs/developer/release/3-update-version-in-code.md | 2 +- docs/developer/release/6-publish-release.md | 10 +++++----- docs/developer/release/8-update-helm-charts.md | 2 +- docs/developer/release/9-announce-release.md | 10 +++++----- docs/developer/release/README.md | 2 +- docs/rfcs/0000-template.md | 2 +- docs/rfcs/0001-designing-in-the-open.md | 6 +++--- .../reference/components/prometheus.exporter.github.md | 2 +- .../reference/components/prometheus.exporter.unix.md | 2 +- docs/sources/tutorials/assets/runt.sh | 2 +- internal/cmd/agentlint/main.go | 2 +- .../prometheus/exporter/github/github_test.go | 8 ++++---- .../component/prometheus/exporter/statsd/config.go | 4 ++-- internal/component/prometheus/exporter/unix/config.go | 4 ++-- .../flow/internal/controller/component_registry.go | 2 +- internal/flowmode/cmd_run.go | 2 +- internal/static/server/server.go | 2 +- operations/river-jsonnet/README.md | 2 +- tools/gen-crd-docs/config.json | 4 ++-- 22 files changed, 42 insertions(+), 42 deletions(-) diff --git a/Makefile b/Makefile index 848f60077c..106191826a 100644 --- a/Makefile +++ b/Makefile @@ -102,7 +102,7 @@ GO_ENV := GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) CGO_ENABLED=$(CGO_ENABLED VERSION ?= $(shell bash ./tools/image-tag) GIT_REVISION := $(shell git rev-parse --short HEAD) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) -VPREFIX := github.com/grafana/agent/internal/build +VPREFIX := github.com/grafana/alloy/internal/build GO_LDFLAGS := -X $(VPREFIX).Branch=$(GIT_BRANCH) \ -X $(VPREFIX).Version=$(VERSION) \ -X $(VPREFIX).Revision=$(GIT_REVISION) \ diff --git a/README.md b/README.md index 0c7b9a1c05..078bfc57a5 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ To engage with the Alloy community: * Attend the monthly [community call][community-call]. -[discussions]: https://github.com/grafana/agent/discussions +[discussions]: https://github.com/grafana/alloy/discussions [issue]: https://github.com/grafana/alloy/issues/new [community-call]: https://docs.google.com/document/d/1TqaZD1JPfNadZ4V81OCBPCG_TksDYGlNlGdMnTWUSpo diff --git a/docs/developer/contributing.md b/docs/developer/contributing.md index 4d14fedbb7..5b4601fcd9 100644 --- a/docs/developer/contributing.md +++ b/docs/developer/contributing.md @@ -62,7 +62,7 @@ To build Grafana Agent from source code, please install the following tools: You can directly use the go tool to download and install the agent binary into your GOPATH: - $ GO111MODULE=on go install github.com/grafana/agent/cmd/alloy + $ GO111MODULE=on go install github.com/grafana/alloy/cmd/alloy $ alloy run your_config.river An example of the above configuration file can be found [here][example-config]. @@ -71,7 +71,7 @@ You can also clone the repository yourself and build using `make agent`: $ mkdir -p $GOPATH/src/github.com/grafana $ cd $GOPATH/src/github.com/grafana - $ git clone https://github.com/grafana/agent.git + $ git clone https://github.com/grafana/alloy.git $ cd agent $ make agent $ ./build/alloy run your_config.river @@ -197,13 +197,13 @@ acceptable, and remove the `replace` directive as soon as possible. If upstream is unresponsive, consider choosing a different dependency or making a hard fork (i.e., creating a new Go module with the same source). -[new-issue]: https://github.com/grafana/agent/issues/new +[new-issue]: https://github.com/grafana/alloy/issues/new [RFC]: ../rfcs/0001-designing-in-the-open.md [code-review-comments]: https://code.google.com/p/go-wiki/wiki/CodeReviewComments [best-practices]: https://peter.bourgon.org/go-in-production/#formatting-and-style [uber-style-guide]: https://github.com/uber-go/guide/blob/master/style.md -[CLA]: https://cla-assistant.io/grafana/agent -[good-first-issue]: https://github.com/grafana/agent/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 +[CLA]: https://cla-assistant.io/grafana/alloy +[good-first-issue]: https://github.com/grafana/alloy/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 [community-slack]: https://slack.grafana.com/ [example-config]: ../../cmd/alloy/example-config.river [go-modules]: https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more diff --git a/docs/developer/release/3-update-version-in-code.md b/docs/developer/release/3-update-version-in-code.md index 7a3c9cdc3b..218f2508b0 100644 --- a/docs/developer/release/3-update-version-in-code.md +++ b/docs/developer/release/3-update-version-in-code.md @@ -10,7 +10,7 @@ The project must be updated to reference the upcoming release tag whenever a new ## Steps -1. Create a branch from `main` for [grafana/agent](https://github.com/grafana/agent). +1. Create a branch from `main` for [grafana/alloy](https://github.com/grafana/alloy). 2. Update the `CHANGELOG.md`: diff --git a/docs/developer/release/6-publish-release.md b/docs/developer/release/6-publish-release.md index 48a492aeb4..a5d85f7458 100644 --- a/docs/developer/release/6-publish-release.md +++ b/docs/developer/release/6-publish-release.md @@ -4,7 +4,7 @@ This is how to publish the release in GitHub. ## Before you begin -1. You should see a new draft release created [here](https://github.com/grafana/agent/releases). If not go back to [Tag Release](./4-tag-release.md). +1. You should see a new draft release created [here](https://github.com/grafana/alloy/releases). If not go back to [Tag Release](./4-tag-release.md). ## Steps @@ -14,9 +14,9 @@ This is how to publish the release in GitHub. 3. Add a footer to the `Notable Changes` section: - `For a full list of changes, please refer to the [CHANGELOG](https://github.com/grafana/agent/blob/RELEASE_VERSION/CHANGELOG.md)!` - - Do not substitute the value for `CHANGELOG`. + `For a full list of changes, please refer to the [CHANGELOG](https://github.com/grafana/alloy/blob/RELEASE_VERSION/CHANGELOG.md)!` + + Do not substitute the value for `CHANGELOG`. 4. At the bottom of the release page, perform the following: - Tick the check box to "add a discussion" under the category for "announcements". @@ -26,4 +26,4 @@ This is how to publish the release in GitHub. 5. Optionally, have other team members review the release draft if you wish to feel more comfortable with it. -6. Publish the release! \ No newline at end of file +6. Publish the release! diff --git a/docs/developer/release/8-update-helm-charts.md b/docs/developer/release/8-update-helm-charts.md index 00dbaaa916..961cb6260a 100644 --- a/docs/developer/release/8-update-helm-charts.md +++ b/docs/developer/release/8-update-helm-charts.md @@ -28,7 +28,7 @@ Our Helm charts require some version updates as well. 3. Open a PR, following the pattern in PR [#2233](https://github.com/grafana/helm-charts/pull/2233). -4. Create a branch from `main` for [grafana/agent](https://github.com/grafana/agent). +4. Create a branch from `main` for [grafana/alloy](https://github.com/grafana/alloy). 5. Update the helm chart code in `$agentRepo/operations/helm`: diff --git a/docs/developer/release/9-announce-release.md b/docs/developer/release/9-announce-release.md index 4319bb5115..4a117ecd42 100644 --- a/docs/developer/release/9-announce-release.md +++ b/docs/developer/release/9-announce-release.md @@ -10,8 +10,8 @@ You made it! This is the last step for any release. ``` :grafana-agent: Grafana Agent RELEASE_VERSION is now available! :grafana-agent: - Release: https://github.com/grafana/agent/releases/tag/RELEASE_VERSION - Full changelog: https://github.com/grafana/agent/blob/RELEASE_VERSION/CHANGELOG.md + Release: https://github.com/grafana/alloy/releases/tag/RELEASE_VERSION + Full changelog: https://github.com/grafana/alloy/blob/RELEASE_VERSION/CHANGELOG.md We'll be publishing STABLE_RELEASE_VERSION on STABLE_RELEASE_DATE if we haven't heard about any major issues. ``` @@ -19,6 +19,6 @@ You made it! This is the last step for any release. ``` :grafana-agent: Grafana Agent RELEASE_VERSION is now available! :grafana-agent: - Release: https://github.com/grafana/agent/releases/tag/RELEASE_VERSION - Full changelog: https://github.com/grafana/agent/blob/RELEASE_VERSION/CHANGELOG.md - ``` \ No newline at end of file + Release: https://github.com/grafana/alloy/releases/tag/RELEASE_VERSION + Full changelog: https://github.com/grafana/alloy/blob/RELEASE_VERSION/CHANGELOG.md + ``` diff --git a/docs/developer/release/README.md b/docs/developer/release/README.md index 116cf69665..5486eed7f4 100644 --- a/docs/developer/release/README.md +++ b/docs/developer/release/README.md @@ -1,7 +1,7 @@ # Releasing This document describes the process of creating a release for the -`grafana/agent` repo. A release includes release assets for everything inside +`grafana/alloy` repo. A release includes release assets for everything inside the repository, including Grafana Agent and Grafana Agent Operator. The processes described here are for v0.24.0 and above. diff --git a/docs/rfcs/0000-template.md b/docs/rfcs/0000-template.md index c565ea04e5..97eab801c0 100644 --- a/docs/rfcs/0000-template.md +++ b/docs/rfcs/0000-template.md @@ -2,4 +2,4 @@ * Date: YYYY-MM-DD * Author: Full Name (@github_username) -* PR: [grafana/agent#XXXX](https://github.com/grafana/agent/pull/XXXX) +* PR: [grafana/agent#XXXX](https://github.com/grafana/alloy/pull/XXXX) diff --git a/docs/rfcs/0001-designing-in-the-open.md b/docs/rfcs/0001-designing-in-the-open.md index 7419b060a3..7976a1b3dc 100644 --- a/docs/rfcs/0001-designing-in-the-open.md +++ b/docs/rfcs/0001-designing-in-the-open.md @@ -57,7 +57,7 @@ Public proposals may take one of two forms: ### Issues Issues are the quickest path towards proposing a change. Issue proposals must -be opened at the [grafana/agent issues page](https://github.com/grafana/agent/issues). +be opened at the [issues page](https://github.com/grafana/alloy/issues). There are no strict set of rules for issue-based proposals, but authors are recommended to prefix the issue title with `Proposal:` so it may be found more @@ -155,5 +155,5 @@ A few existing public proposal processes have been examined for inspiration: All of these processes are similar, but in the end, the current objective is to start collecting proposals publicly rather than to be prescriptive yet. -[rough consensus]: https://github.com/grafana/agent/blob/main/GOVERNANCE.md#technical-decisions -[governance]: https://github.com/grafana/agent/blob/main/GOVERNANCE.md#team-members +[rough consensus]: https://github.com/grafana/alloy/blob/main/GOVERNANCE.md#technical-decisions +[governance]: https://github.com/grafana/alloy/blob/main/GOVERNANCE.md#team-members diff --git a/docs/sources/reference/components/prometheus.exporter.github.md b/docs/sources/reference/components/prometheus.exporter.github.md index a803653ccd..0167fbb347 100644 --- a/docs/sources/reference/components/prometheus.exporter.github.md +++ b/docs/sources/reference/components/prometheus.exporter.github.md @@ -62,7 +62,7 @@ from `prometheus.exporter.github`: ```river prometheus.exporter.github "example" { api_token_file = "/etc/github-api-token" - repositories = ["grafana/agent"] + repositories = ["grafana/alloy"] } // Configure a prometheus.scrape component to collect github metrics. diff --git a/docs/sources/reference/components/prometheus.exporter.unix.md b/docs/sources/reference/components/prometheus.exporter.unix.md index feda1ab9f5..7384e2aecc 100644 --- a/docs/sources/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/reference/components/prometheus.exporter.unix.md @@ -130,7 +130,7 @@ The following blocks are supported inside the definition of ### filesystem block -The default values can vary by the operating system the agent runs on - refer to the [integration source](https://github.com/grafana/agent/blob/main/internal/static/integrations/node_exporter/config.go) for up-to-date values on each OS. +The default values can vary by the operating system the agent runs on - refer to the [integration source](https://github.com/grafana/alloy/blob/main/internal/static/integrations/node_exporter/config.go) for up-to-date values on each OS. | Name | Type | Description | Default | Required | | ---------------------- | ---------- | ------------------------------------------------------------------- | ----------------------------------------------- | -------- | diff --git a/docs/sources/tutorials/assets/runt.sh b/docs/sources/tutorials/assets/runt.sh index 195da7ef78..61d1f62c0e 100644 --- a/docs/sources/tutorials/assets/runt.sh +++ b/docs/sources/tutorials/assets/runt.sh @@ -24,5 +24,5 @@ curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tuto mkdir -p ./grafana/dashboards curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tutorials/assets/grafana/dashboards/template.jsonnet -o ./grafana/dashboards/template.jsonnet curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tutorials/assets/grafana/dashboards/agent.json -o ./grafana/dashboards/agent.json -docker pull grafana/agent:main +docker pull grafana/agent:main CONFIG_FILE=$1 docker-compose -f ./docker-compose.yaml up diff --git a/internal/cmd/agentlint/main.go b/internal/cmd/agentlint/main.go index 01156d5ecb..30ebbbaff9 100644 --- a/internal/cmd/agentlint/main.go +++ b/internal/cmd/agentlint/main.go @@ -1,4 +1,4 @@ -// Command agentlint provides custom linting utilities for the grafana/agent +// Command agentlint provides custom linting utilities for the grafana/alloy // repo. package main diff --git a/internal/component/prometheus/exporter/github/github_test.go b/internal/component/prometheus/exporter/github/github_test.go index d22248ce96..2a729298a8 100644 --- a/internal/component/prometheus/exporter/github/github_test.go +++ b/internal/component/prometheus/exporter/github/github_test.go @@ -10,7 +10,7 @@ import ( func TestUnmarshalRiver(t *testing.T) { riverCfg := ` api_token_file = "/etc/github-api-token" - repositories = ["grafana/agent"] + repositories = ["grafana/alloy"] organizations = ["grafana", "prometheus"] users = ["jcreixell"] api_url = "https://some-other-api.github.com" @@ -19,7 +19,7 @@ func TestUnmarshalRiver(t *testing.T) { err := syntax.Unmarshal([]byte(riverCfg), &args) require.NoError(t, err) require.Equal(t, "/etc/github-api-token", args.APITokenFile) - require.Equal(t, []string{"grafana/agent"}, args.Repositories) + require.Equal(t, []string{"grafana/alloy"}, args.Repositories) require.Contains(t, args.Organizations, "grafana") require.Contains(t, args.Organizations, "prometheus") require.Equal(t, []string{"jcreixell"}, args.Users) @@ -29,7 +29,7 @@ func TestUnmarshalRiver(t *testing.T) { func TestConvert(t *testing.T) { args := Arguments{ APITokenFile: "/etc/github-api-token", - Repositories: []string{"grafana/agent"}, + Repositories: []string{"grafana/alloy"}, Organizations: []string{"grafana", "prometheus"}, Users: []string{"jcreixell"}, APIURL: "https://some-other-api.github.com", @@ -37,7 +37,7 @@ func TestConvert(t *testing.T) { res := args.Convert() require.Equal(t, "/etc/github-api-token", res.APITokenFile) - require.Equal(t, []string{"grafana/agent"}, res.Repositories) + require.Equal(t, []string{"grafana/alloy"}, res.Repositories) require.Contains(t, res.Organizations, "grafana") require.Contains(t, res.Organizations, "prometheus") require.Equal(t, []string{"jcreixell"}, res.Users) diff --git a/internal/component/prometheus/exporter/statsd/config.go b/internal/component/prometheus/exporter/statsd/config.go index 31a97fdf74..0bda74fbd6 100644 --- a/internal/component/prometheus/exporter/statsd/config.go +++ b/internal/component/prometheus/exporter/statsd/config.go @@ -35,7 +35,7 @@ type Arguments struct { // DefaultConfig holds non-zero default options for the Config when it is // unmarshaled from YAML. // -// Some defaults are populated from init functions in the github.com/grafana/agent/internal/static/integrations/statsd_exporter package. +// Some defaults are populated from init functions in the github.com/grafana/alloy/internal/static/integrations/statsd_exporter package. var DefaultConfig = Arguments{ ListenUDP: statsd_exporter.DefaultConfig.ListenUDP, @@ -56,7 +56,7 @@ var DefaultConfig = Arguments{ RelayPacketLength: statsd_exporter.DefaultConfig.RelayPacketLength, } -// Convert gives a config suitable for use with github.com/grafana/agent/internal/static/integrations/statsd_exporter. +// Convert gives a config suitable for use with github.com/grafana/alloy/internal/static/integrations/statsd_exporter. func (c *Arguments) Convert() (*statsd_exporter.Config, error) { var ( mappingConfig any diff --git a/internal/component/prometheus/exporter/unix/config.go b/internal/component/prometheus/exporter/unix/config.go index 2a89c5594b..04ce1a90cf 100644 --- a/internal/component/prometheus/exporter/unix/config.go +++ b/internal/component/prometheus/exporter/unix/config.go @@ -10,7 +10,7 @@ import ( // DefaultArguments holds non-zero default options for Arguments when it is // unmarshaled from YAML. // -// Some defaults are populated from init functions in the github.com/grafana/agent/internal/static/integrations/node_exporter package. +// Some defaults are populated from init functions in the github.com/grafana/alloy/internal/static/integrations/node_exporter package. var DefaultArguments = Arguments{ ProcFSPath: node_integration.DefaultConfig.ProcFSPath, RootFSPath: node_integration.DefaultConfig.RootFSPath, @@ -101,7 +101,7 @@ type Arguments struct { VMStat VMStatConfig `alloy:"vmstat,block,optional"` } -// Convert gives a config suitable for use with github.com/grafana/agent/internal/static/integrations/node_exporter. +// Convert gives a config suitable for use with github.com/grafana/alloy/internal/static/integrations/node_exporter. func (a *Arguments) Convert() *node_integration.Config { return &node_integration.Config{ IncludeExporterMetrics: a.IncludeExporterMetrics, diff --git a/internal/flow/internal/controller/component_registry.go b/internal/flow/internal/controller/component_registry.go index df75f1655e..1b585033f8 100644 --- a/internal/flow/internal/controller/component_registry.go +++ b/internal/flow/internal/controller/component_registry.go @@ -19,7 +19,7 @@ type defaultComponentRegistry struct { } // NewDefaultComponentRegistry creates a new [ComponentRegistry] which gets -// components registered to github.com/grafana/agent/component. +// components registered to github.com/grafana/alloy/internal/component. func NewDefaultComponentRegistry(minStability featuregate.Stability) ComponentRegistry { return defaultComponentRegistry{ minStability: minStability, diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index fd4238d7a6..817f147cdf 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -302,7 +302,7 @@ func (fr *flowRun) Run(configPath string) error { return nil, fmt.Errorf("reading config path %q: %w", configPath, err) } if err := f.LoadSource(flowSource, nil); err != nil { - return flowSource, fmt.Errorf("error during the initial grafana/agent load: %w", err) + return flowSource, fmt.Errorf("error during the initial load: %w", err) } return flowSource, nil diff --git a/internal/static/server/server.go b/internal/static/server/server.go index adae8c6a30..ffaae930f7 100644 --- a/internal/static/server/server.go +++ b/internal/static/server/server.go @@ -1,7 +1,7 @@ // Package server implements the HTTP and gRPC server used throughout Grafana // Agent. // -// It is a grafana/agent-specific fork of github.com/weaveworks/common/server. +// It is a grafana/alloy-specific fork of github.com/weaveworks/common/server. package server import ( diff --git a/operations/river-jsonnet/README.md b/operations/river-jsonnet/README.md index aaac13d610..3e5466c3f3 100644 --- a/operations/river-jsonnet/README.md +++ b/operations/river-jsonnet/README.md @@ -34,7 +34,7 @@ the literal River expression `env("HOME")`. ## Example ```jsonnet -local river = import 'github.com/grafana/agent/operations/river-jsonnet/main.libsonnet'; +local river = import 'github.com/grafana/alloy/operations/river-jsonnet/main.libsonnet'; river.manifestRiver({ attr_1: "Hello, world!", diff --git a/tools/gen-crd-docs/config.json b/tools/gen-crd-docs/config.json index 8ae1e273e8..b7791c157c 100644 --- a/tools/gen-crd-docs/config.json +++ b/tools/gen-crd-docs/config.json @@ -32,8 +32,8 @@ "docsURLTemplate": "https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client#Object" }, { - "typeMatchPrefix": "^github\\.com/grafana/agent/internal/static/operator/assets\\.SecretStore$", - "docsURLTemplate": "https://pkg.go.dev/github.com/grafana/agent/internal/static/operator/assets#SecretStore" + "typeMatchPrefix": "^github\\.com/grafana/alloy/internal/static/operator/assets\\.SecretStore$", + "docsURLTemplate": "https://pkg.go.dev/github.com/grafana/alloy/internal/static/operator/assets#SecretStore" } ], "typeDisplayNamePrefixOverrides": { From 5e64dc94a97249ff9c349d8d40711311105ab0e2 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 15:44:31 -0400 Subject: [PATCH 054/136] misc: rename agentseed to alloyseed --- .../agentseed.go => alloyseed/alloyseed.go} | 40 +++++++++++-------- .../alloyseed_test.go} | 2 +- internal/component/loki/write/write.go | 7 ++-- .../prometheus/remotewrite/remote_write.go | 7 ++-- internal/component/pyroscope/write/write.go | 7 ++-- internal/flowmode/cmd_run.go | 4 +- internal/service/remotecfg/remotecfg.go | 4 +- internal/usagestats/reporter.go | 10 ++--- internal/usagestats/reporter_test.go | 2 +- internal/usagestats/stats.go | 4 +- 10 files changed, 49 insertions(+), 38 deletions(-) rename internal/{agentseed/agentseed.go => alloyseed/alloyseed.go} (77%) rename internal/{agentseed/agentseed_test.go => alloyseed/alloyseed_test.go} (98%) diff --git a/internal/agentseed/agentseed.go b/internal/alloyseed/alloyseed.go similarity index 77% rename from internal/agentseed/agentseed.go rename to internal/alloyseed/alloyseed.go index 8ee9140fb0..13f1f661f7 100644 --- a/internal/agentseed/agentseed.go +++ b/internal/alloyseed/alloyseed.go @@ -1,4 +1,4 @@ -package agentseed +package alloyseed import ( "encoding/json" @@ -15,17 +15,25 @@ import ( "github.com/prometheus/common/version" ) -// AgentSeed identifies a unique agent -type AgentSeed struct { +// Seed identifies a unique agent. +type Seed struct { UID string `json:"UID"` CreatedAt time.Time `json:"created_at"` Version string `json:"version"` } -const HeaderName = "X-Agent-Id" -const filename = "agent_seed.json" +const ( + // Both LegacyHeaderName and HeaderName should be used to identify the Alloy + // instance in the headers of requests. LegacyHeaderName is used for + // backwards compatibility. -var savedSeed *AgentSeed + LegacyHeaderName = "X-Agent-Id" // LegacyHeaderName represents the header name used prior to the Alloy release. + HeaderName = "X-Alloy-Id" // HeaderName represents the ID header to use for Alloy. +) + +const filename = "alloy_seed.json" + +var savedSeed *Seed var once sync.Once // Init should be called by an app entrypoint as soon as it can to configure where the unique seed will be stored. @@ -43,7 +51,7 @@ func Init(dir string, l log.Logger) { func loadOrGenerate(dir string, l log.Logger) { var err error - var seed *AgentSeed + var seed *Seed // list of paths in preference order. // we will always write to the first path paths := []string{} @@ -76,18 +84,18 @@ func loadOrGenerate(dir string, l log.Logger) { } } -func generateNew() *AgentSeed { - return &AgentSeed{ +func generateNew() *Seed { + return &Seed{ UID: uuid.NewString(), Version: version.Version, CreatedAt: time.Now(), } } -// Get will return a unique agent seed for this agent. +// Get returns a unique seed for this Alloy instance. // It will always return a valid seed, even if previous attempts to // load or save the seed file have failed -func Get() *AgentSeed { +func Get() *Seed { // Init should have been called before this. If not, call it now with defaults. once.Do(func() { loadOrGenerate("", log.NewNopLogger()) @@ -100,14 +108,14 @@ func Get() *AgentSeed { return generateNew() } -// readSeedFile reads the agent seed file -func readSeedFile(path string, logger log.Logger) (*AgentSeed, error) { +// readSeedFile reads the Alloy seed file +func readSeedFile(path string, logger log.Logger) (*Seed, error) { data, err := os.ReadFile(path) if err != nil { level.Error(logger).Log("msg", "Reading seed file", "err", err) return nil, err } - seed := &AgentSeed{} + seed := &Seed{} err = json.Unmarshal(data, seed) if err != nil { level.Error(logger).Log("msg", "Decoding seed file", "err", err) @@ -134,8 +142,8 @@ func fileExists(path string) bool { return !errors.Is(err, os.ErrNotExist) } -// writeSeedFile writes the agent seed file -func writeSeedFile(seed *AgentSeed, path string, logger log.Logger) { +// writeSeedFile writes the Alloy seed file +func writeSeedFile(seed *Seed, path string, logger log.Logger) { data, err := json.Marshal(*seed) if err != nil { level.Error(logger).Log("msg", "Encoding seed file", "err", err) diff --git a/internal/agentseed/agentseed_test.go b/internal/alloyseed/alloyseed_test.go similarity index 98% rename from internal/agentseed/agentseed_test.go rename to internal/alloyseed/alloyseed_test.go index 91650c59e7..e0ea4fee07 100644 --- a/internal/agentseed/agentseed_test.go +++ b/internal/alloyseed/alloyseed_test.go @@ -1,4 +1,4 @@ -package agentseed +package alloyseed import ( "os" diff --git a/internal/component/loki/write/write.go b/internal/component/loki/write/write.go index 909214393c..4c86e5a01a 100644 --- a/internal/component/loki/write/write.go +++ b/internal/component/loki/write/write.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/common/loki" "github.com/grafana/alloy/internal/component/common/loki/client" @@ -163,13 +163,14 @@ func (c *Component) Update(args component.Arguments) error { cfgs := newArgs.convertClientConfigs() - uid := agentseed.Get().UID + uid := alloyseed.Get().UID for i := range cfgs { //cfgs is slice of struct values, so we set by index if cfgs[i].Headers == nil { cfgs[i].Headers = map[string]string{} } - cfgs[i].Headers[agentseed.HeaderName] = uid + cfgs[i].Headers[alloyseed.LegacyHeaderName] = uid + cfgs[i].Headers[alloyseed.HeaderName] = uid } walCfg := wal.Config{ Enabled: newArgs.WAL.Enabled, diff --git a/internal/component/prometheus/remotewrite/remote_write.go b/internal/component/prometheus/remotewrite/remote_write.go index 14bf08c8b4..747311a64f 100644 --- a/internal/component/prometheus/remotewrite/remote_write.go +++ b/internal/component/prometheus/remotewrite/remote_write.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/component/prometheus" "github.com/grafana/alloy/internal/featuregate" @@ -257,12 +257,13 @@ func (c *Component) Update(newConfig component.Arguments) error { if err != nil { return err } - uid := agentseed.Get().UID + uid := alloyseed.Get().UID for _, cfg := range convertedConfig.RemoteWriteConfigs { if cfg.Headers == nil { cfg.Headers = map[string]string{} } - cfg.Headers[agentseed.HeaderName] = uid + cfg.Headers[alloyseed.LegacyHeaderName] = uid + cfg.Headers[alloyseed.HeaderName] = uid } err = c.remoteStore.ApplyConfig(convertedConfig) if err != nil { diff --git a/internal/component/pyroscope/write/write.go b/internal/component/pyroscope/write/write.go index ff51ede6d2..067687aae9 100644 --- a/internal/component/pyroscope/write/write.go +++ b/internal/component/pyroscope/write/write.go @@ -7,7 +7,7 @@ import ( "time" "connectrpc.com/connect" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/component/pyroscope" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/internal/flow/logging/level" @@ -159,12 +159,13 @@ type fanOutClient struct { // NewFanOut creates a new fan out client that will fan out to all endpoints. func NewFanOut(opts component.Options, config Arguments, metrics *metrics) (*fanOutClient, error) { clients := make([]pushv1connect.PusherServiceClient, 0, len(config.Endpoints)) - uid := agentseed.Get().UID + uid := alloyseed.Get().UID for _, endpoint := range config.Endpoints { if endpoint.Headers == nil { endpoint.Headers = map[string]string{} } - endpoint.Headers[agentseed.HeaderName] = uid + endpoint.Headers[alloyseed.LegacyHeaderName] = uid + endpoint.Headers[alloyseed.HeaderName] = uid httpClient, err := commonconfig.NewClientFromConfig(*endpoint.HTTPClientConfig.Convert(), endpoint.Name) if err != nil { return nil, err diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index 817f147cdf..fa37b1cac9 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -17,7 +17,7 @@ import ( "github.com/fatih/color" "github.com/go-kit/log" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/boringcrypto" "github.com/grafana/alloy/internal/component" "github.com/grafana/alloy/internal/converter" @@ -274,7 +274,7 @@ func (fr *flowRun) Run(configPath string) error { } labelService := labelstore.New(l, reg) - agentseed.Init(fr.storagePath, l) + alloyseed.Init(fr.storagePath, l) f := flow.New(flow.Options{ Logger: l, diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index 8d9d7296a5..f45f80ae19 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -15,7 +15,7 @@ import ( "github.com/go-kit/log" agentv1 "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1" "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1/agentv1connect" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/component/common/config" "github.com/grafana/alloy/internal/featuregate" "github.com/grafana/alloy/internal/flow/logging/level" @@ -72,7 +72,7 @@ type Arguments struct { // GetDefaultArguments populates the default values for the Arguments struct. func GetDefaultArguments() Arguments { return Arguments{ - ID: agentseed.Get().UID, + ID: alloyseed.Get().UID, Metadata: make(map[string]string), PollFrequency: 1 * time.Minute, HTTPClientConfig: config.CloneDefaultHTTPClientConfig(), diff --git a/internal/usagestats/reporter.go b/internal/usagestats/reporter.go index b8fd90228e..b5d37c55c1 100644 --- a/internal/usagestats/reporter.go +++ b/internal/usagestats/reporter.go @@ -7,7 +7,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/multierror" ) @@ -21,7 +21,7 @@ var ( type Reporter struct { logger log.Logger - agentSeed *agentseed.AgentSeed + seed *alloyseed.Seed lastReport time.Time } @@ -36,14 +36,14 @@ func NewReporter(logger log.Logger) (*Reporter, error) { // Start inits the reporter seed and start sending report for every interval func (rep *Reporter) Start(ctx context.Context, metricsFunc func() map[string]interface{}) error { level.Info(rep.logger).Log("msg", "running usage stats reporter") - rep.agentSeed = agentseed.Get() + rep.seed = alloyseed.Get() // check every minute if we should report. ticker := time.NewTicker(reportCheckInterval) defer ticker.Stop() // find when to send the next report. - next := nextReport(reportInterval, rep.agentSeed.CreatedAt, time.Now()) + next := nextReport(reportInterval, rep.seed.CreatedAt, time.Now()) if rep.lastReport.IsZero() { // if we never reported assumed it was the last interval. rep.lastReport = next.Add(-reportInterval) @@ -77,7 +77,7 @@ func (rep *Reporter) reportUsage(ctx context.Context, interval time.Time, metric }) var errs multierror.MultiError for backoff.Ongoing() { - if err := sendReport(ctx, rep.agentSeed, interval, metrics); err != nil { + if err := sendReport(ctx, rep.seed, interval, metrics); err != nil { level.Info(rep.logger).Log("msg", "failed to send usage report", "retries", backoff.NumRetries(), "err", err) errs.Add(err) backoff.Wait() diff --git a/internal/usagestats/reporter_test.go b/internal/usagestats/reporter_test.go index bbe3c14f8f..54e71aea16 100644 --- a/internal/usagestats/reporter_test.go +++ b/internal/usagestats/reporter_test.go @@ -61,7 +61,7 @@ func Test_ReportLoop(t *testing.T) { for _, uid := range agentIDs { require.Equal(t, first, uid) } - require.Equal(t, first, r.agentSeed.UID) + require.Equal(t, first, r.seed.UID) } func Test_NextReport(t *testing.T) { diff --git a/internal/usagestats/stats.go b/internal/usagestats/stats.go index b1b08c48dc..aa73068e2b 100644 --- a/internal/usagestats/stats.go +++ b/internal/usagestats/stats.go @@ -10,7 +10,7 @@ import ( "runtime" "time" - "github.com/grafana/alloy/internal/agentseed" + "github.com/grafana/alloy/internal/alloyseed" "github.com/grafana/alloy/internal/useragent" "github.com/prometheus/common/version" ) @@ -32,7 +32,7 @@ type Report struct { DeployMode string `json:"deployMode"` } -func sendReport(ctx context.Context, seed *agentseed.AgentSeed, interval time.Time, metrics map[string]interface{}) error { +func sendReport(ctx context.Context, seed *alloyseed.Seed, interval time.Time, metrics map[string]interface{}) error { report := Report{ UsageStatsID: seed.UID, CreatedAt: seed.CreatedAt, From 6ecf77f00031cbf392d702dd83e3fc86d86346f1 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 15:46:38 -0400 Subject: [PATCH 055/136] misc: rename agentlint to alloylint --- Makefile | 10 +++++----- internal/cmd/agentlint/go.mod | 7 ------- internal/cmd/agentlint/go.sum | 6 ------ .../internal/findcomponents/findcomponents.go | 0 .../internal/syntaxtags/syntaxtags.go | 0 internal/cmd/{agentlint => alloylint}/main.go | 6 +++--- 6 files changed, 8 insertions(+), 21 deletions(-) delete mode 100644 internal/cmd/agentlint/go.mod delete mode 100644 internal/cmd/agentlint/go.sum rename internal/cmd/{agentlint => alloylint}/internal/findcomponents/findcomponents.go (100%) rename internal/cmd/{agentlint => alloylint}/internal/syntaxtags/syntaxtags.go (100%) rename internal/cmd/{agentlint => alloylint}/main.go (51%) diff --git a/Makefile b/Makefile index 106191826a..de4771c88f 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,7 @@ include tools/make/*.mk ALLOY_IMAGE ?= grafana/alloy:latest ALLOY_BINARY ?= build/alloy SERVICE_BINARY ?= build/alloy-service -AGENTLINT_BINARY ?= build/agentlint +ALLOYLINT_BINARY ?= build/alloylint GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) GOARM ?= $(shell go env GOARM) @@ -126,9 +126,9 @@ endif # .PHONY: lint -lint: agentlint +lint: alloylint find . -name go.mod -execdir golangci-lint run -v --timeout=10m \; - $(AGENTLINT_BINARY) ./... + $(ALLOYLINT_BINARY) ./... .PHONY: test # We have to run test twice: once for all packages with -race and then once @@ -169,11 +169,11 @@ else $(GO_ENV) go build $(GO_FLAGS) -o $(SERVICE_BINARY) ./cmd/alloy-service endif -agentlint: +alloylint: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - cd ./internal/cmd/agentlint && $(GO_ENV) go build $(GO_FLAGS) -o ../../../$(AGENTLINT_BINARY) . + cd ./internal/cmd/alloylint && $(GO_ENV) go build $(GO_FLAGS) -o ../../../$(ALLOYLINT_BINARY) . endif # diff --git a/internal/cmd/agentlint/go.mod b/internal/cmd/agentlint/go.mod deleted file mode 100644 index 31cf906db8..0000000000 --- a/internal/cmd/agentlint/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/grafana/alloy/internal/cmd/agentlint - -go 1.21 - -require golang.org/x/tools v0.17.0 - -require golang.org/x/mod v0.14.0 // indirect diff --git a/internal/cmd/agentlint/go.sum b/internal/cmd/agentlint/go.sum deleted file mode 100644 index 84d5fdb7e0..0000000000 --- a/internal/cmd/agentlint/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= diff --git a/internal/cmd/agentlint/internal/findcomponents/findcomponents.go b/internal/cmd/alloylint/internal/findcomponents/findcomponents.go similarity index 100% rename from internal/cmd/agentlint/internal/findcomponents/findcomponents.go rename to internal/cmd/alloylint/internal/findcomponents/findcomponents.go diff --git a/internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go b/internal/cmd/alloylint/internal/syntaxtags/syntaxtags.go similarity index 100% rename from internal/cmd/agentlint/internal/syntaxtags/syntaxtags.go rename to internal/cmd/alloylint/internal/syntaxtags/syntaxtags.go diff --git a/internal/cmd/agentlint/main.go b/internal/cmd/alloylint/main.go similarity index 51% rename from internal/cmd/agentlint/main.go rename to internal/cmd/alloylint/main.go index 30ebbbaff9..9ac7c80a99 100644 --- a/internal/cmd/agentlint/main.go +++ b/internal/cmd/alloylint/main.go @@ -1,10 +1,10 @@ -// Command agentlint provides custom linting utilities for the grafana/alloy +// Command alloylint provides custom linting utilities for the grafana/alloy // repo. package main import ( - "github.com/grafana/alloy/internal/cmd/agentlint/internal/findcomponents" - "github.com/grafana/alloy/internal/cmd/agentlint/internal/syntaxtags" + "github.com/grafana/alloy/internal/cmd/alloylint/internal/findcomponents" + "github.com/grafana/alloy/internal/cmd/alloylint/internal/syntaxtags" "golang.org/x/tools/go/analysis/multichecker" ) From 8ccd342ec3f1aa779b33473cbc53c41c1f65e25e Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 16:11:33 -0400 Subject: [PATCH 056/136] docs/rfcs: remove all RFCs (#72) Remove all RFCs, as nearly every RFC was specific to grafana/agent. RFCs which also apply to Alloy will be rolled into the docs. This creates a fresh start for RFCs, and allows us to reevaluate how we want to support them. --- docs/rfcs/0000-template.md | 5 - docs/rfcs/0001-designing-in-the-open.md | 159 -------- docs/rfcs/0002-integrations-in-operator.md | 299 --------------- docs/rfcs/0003-new-metrics-subsystem.md | 279 -------------- docs/rfcs/0004-agent-flow.md | 195 ---------- docs/rfcs/0005-river.md | 408 --------------------- docs/rfcs/0006-clustering.md | 330 ----------------- docs/rfcs/0006-future-of-agent-operator.md | 35 -- docs/rfcs/0007-flow-modules.md | 230 ------------ docs/rfcs/0008-backwards-compatibility.md | 86 ----- 10 files changed, 2026 deletions(-) delete mode 100644 docs/rfcs/0000-template.md delete mode 100644 docs/rfcs/0001-designing-in-the-open.md delete mode 100644 docs/rfcs/0002-integrations-in-operator.md delete mode 100644 docs/rfcs/0003-new-metrics-subsystem.md delete mode 100644 docs/rfcs/0004-agent-flow.md delete mode 100644 docs/rfcs/0005-river.md delete mode 100644 docs/rfcs/0006-clustering.md delete mode 100644 docs/rfcs/0006-future-of-agent-operator.md delete mode 100644 docs/rfcs/0007-flow-modules.md delete mode 100644 docs/rfcs/0008-backwards-compatibility.md diff --git a/docs/rfcs/0000-template.md b/docs/rfcs/0000-template.md deleted file mode 100644 index 97eab801c0..0000000000 --- a/docs/rfcs/0000-template.md +++ /dev/null @@ -1,5 +0,0 @@ -# Title - -* Date: YYYY-MM-DD -* Author: Full Name (@github_username) -* PR: [grafana/agent#XXXX](https://github.com/grafana/alloy/pull/XXXX) diff --git a/docs/rfcs/0001-designing-in-the-open.md b/docs/rfcs/0001-designing-in-the-open.md deleted file mode 100644 index 7976a1b3dc..0000000000 --- a/docs/rfcs/0001-designing-in-the-open.md +++ /dev/null @@ -1,159 +0,0 @@ -# Designing in the Open - -* Date: 2021-11-02 -* Author: Robert Fratto (@rfratto) -* PR: [grafana/agent#1055](https://github.com/grafana/agent/pull/1055) - -## Summary - -Many open source projects start behind closed doors, where it's designed, -prototyped, and tested before being released publicly. This can be true -regardless of why the project is being made; even personal side projects likely -start by someone designing alone. - -Meanwhile, many open source projects might want to create a community of -developers. Much of the beauty of successful open source projects originates -from the varied backgrounds of its contributors: different people with -different use cases combining together to make a widely useful piece of -software. - -However, even with an intent to foster a community of developers, it's natural -to accidentally build a habit from the closed-door design process. Even when -once-private proposals are made public, potential external contributors can -find themselves simply as spectators: - -* Initial design is gated to core maintainers, in particular if they all work - for the same company. This leaves less room for new people to help out. -* New concerns are less impactful if the proposal already received core - maintainer consensus. -* Historical proposals with context and discussions become hard to find. - -I believe it takes a deliberate inversion of process to foster community -participation. This document proposes how Grafana Agent will utilize public -spaces for its primary home for future design proposals. - -## Goals - -* Outline options for proposing changes to Grafana Agent -* Lower the barrier to entry for interested parties to become maintainers - -## Non-Goals - -* Enforce that every change originates from a fully public proposal or - discussion. While all maintainers and contributors will be encouraged to - design openly, there may be legal, security, privacy, or business reasons - that prevent some or all context from being made public. - -* Be overly prescriptive: too many rules can hinder adoption of a process. This - document outlines intention, not hard policy. - -## Proposal - -Public proposals may take one of two forms: - -* Issue proposals -* RFC PR proposals (e.g., this document) - -### Issues - -Issues are the quickest path towards proposing a change. Issue proposals must -be opened at the [issues page](https://github.com/grafana/alloy/issues). - -There are no strict set of rules for issue-based proposals, but authors are -recommended to prefix the issue title with `Proposal:` so it may be found more -easily. - -### RFC PRs - -RFC PR proposals must at least: - -* Be placed in the `docs/rfcs` folder of the `grafana/agent` repository -* Have a lowercase filename in hyphen-case with an `.md` extension -* Prefix the filename with the RFC ID - * ID `xxxx` may be initially used until the final ID is known -* Contain valid markdown -* Start with the title of the proposal -* Contain a bullet point list of metadata of: - * The date the proposal was written - * The list of authors, with their names and GitHub usernames - * The PR where the proposal was posted - * The status of the proposal - -`0000-template.md` contains a template to use for writing proposals that -conforms to these rules. - -The remainder of the proposal may be formatted however the author wishes. Some -example sections in the RFC may be: - -* Summary: What is the background that lead to this proposal? -* Goals: What are the main goals of the proposal? -* Non-Goals: What _aren't_ the main goals of the proposal? -* Proposal: What is the proposal? -* Pros/Cons: - * Pros: What are the upsides to this proposal? - * Cons: What are the downsides to this proposal? -* Considered Alternatives: Why is this proposal the best path forward? What - were the alternatives? -* Open Questions: What questions still need to be answered? -* Prior Art: What was this proposal based on, if anything? - -#### RFC Status - -The "Status" field of an RFC must be one of the following: - -* Draft: This RFC is a work-in-progress and may change -* Implemented: Relevant code for this RFC has been merged to the main branch -* Deprecated: This RFC is no longer relevant to the current state of the - project - -RFCs may be merged in Draft state as work on them progresses. The _Draft_ state -is intended to signal to readers that an RFC is in flux. Once all relevant code -for an RFC is merged to main, the RFC may move to the _Implemented_ status. -RFCs without code, such as this RFC, may immediately be set as Implemented. - -If, for any reason, an RFC becomes no longer relevant (deprecated by another -RFC, code removed, etc.), its status should move to Deprecated. - -#### RFC Review - -RFCs should be opened as a PR to grafana/agent, ideally prefixed in the PR -title with `RFC:` to easily identify it amongst other PRs. - -### Google Docs Proposals - -Google Docs may be useful for early feedback rounds during a proposal. However, -it is not recommended for the permanent home of a proposal: - -* Change and comment history may not be available to all viewers. - -* The file owner may delete the proposal, leading to a gap in historical - context. - -Google Docs proposals will be permitted if linked to from an issue proposal. -Google Doc proposals must be converted into an RFC proposal prior to formally -accepting the proposal. Enforcing this ensures that historical context is -recorded, though it is still not ideal as it discards comment history. - -## Accepting Proposals - -All readers are encouraged to engage in reviewing proposals. However, whether a -proposal is accepted is determined by [rough consensus][] of the Grafana Agent -governance team. External contributors may eventually be invited to [join the -governance team][governance] if they have a history of making ongoing -contributions to the project or community. - -## Considered alternatives - -A few existing public proposal processes have been examined for inspiration: - -* [IETF's RFCs](https://www.ietf.org/standards/rfcs/) -* [Rust's RFCs](https://github.com/rust-lang/rfcs) -* [Joyent's Requests for Discussions](https://github.com/joyent/rfd) -* [OpenTelemetry's OTEPs](https://github.com/open-telemetry/oteps) -* [Kubernetes Enhancement Proposals (KEPs)](https://github.com/kubernetes/enhancements) - -All of these processes are similar, but in the end, the current objective is to -start collecting proposals publicly rather than to be prescriptive yet. - -[rough consensus]: https://github.com/grafana/alloy/blob/main/GOVERNANCE.md#technical-decisions -[governance]: https://github.com/grafana/alloy/blob/main/GOVERNANCE.md#team-members diff --git a/docs/rfcs/0002-integrations-in-operator.md b/docs/rfcs/0002-integrations-in-operator.md deleted file mode 100644 index ed54d40de6..0000000000 --- a/docs/rfcs/0002-integrations-in-operator.md +++ /dev/null @@ -1,299 +0,0 @@ -# Integrations in Grafana Agent Operator - -* Date: 2022-01-04 -* Author: Robert Fratto (@rfratto) -* PR: [grafana/agent#1224](https://github.com/grafana/agent/pull/1224) - -## Background - -Grafana Agent includes support for integrations, which are intended as -"batteries-included" features to assist with collecting telemetry data. With -the `integrations-next` feature enabled, there are multiple types of -integrations: - -* Integrations that generate metrics (i.e., `node_exporter`) -* Integrations that generate logs (i.e., `eventhandler`) -* Integrations that generate other types of telemetry are planned (i.e., an - upcoming `app_agent_receiver`) - -Generically, an integration is a specialized telemetry collector for some -system under observation. For example, a `redis` integration collects telemetry -for Redis. Integrations can generate any combination of Prometheus metrics, -Grafana Loki logs, or Grafana Tempo traces. - -This document proposes adding a way to add support for all current and future -integrations into the Grafana Agent Operator. - -This proposal supersedes [#883][], which was the first attempt at designing the -feature. This proposal takes advantage of the lessons I've learned and -minimizes the implementation effort. - -## Goals - -* Allow Grafana Agent Operator to deploy integrations -* Allow deployed integrations to write telemetry data -* Support integrations that must exist on every machine (i.e., `node_exporter`) -* Minimize development effort for creating new integrations - -## Non-Goals - -* Support externally collecting metrics from integrations - -## Proposal - -At a high level, the proposal is to: - -* Define a new `Integration` CRD which specifies a single instance of an - integration to run. -* Update `GrafanaAgent` to discover `Integration`s and run integrations. - -## Architecture - -### Running integrations - -The new CRD, Integration, will be used for supporting all current integrations. -The spec of Integration primarily revolves around three fields: - -* `name`: The name of the integration (e.g., `node_exporter`, `mysqld_exporter`) -* `type`: Information about the integration being deployed -* `config`: YAML configuration block for the integration - -The `type` field is an object with the following fields: - -* `allNodes`: True when the `name` integration should run on all Kubernetes - Nodes. -* `unique`: True when the `name` integration must be unique across a - GrafanaAgent resource hierarchy. - -> Example of a valid integration: -> -> ```yaml -> apiVersion: monitoring.grafana.com/v1alpha1 -> kind: Integration -> metadata: -> name: mysql -> namespace: default -> spec: -> name: mysqld_exporter -> type: -> allNodes: false # optional; false is default -> unique: false # optional; false is default -> config: -> data_source_name: root@(mysql.default:3306)/ -> disable_collectors: [slave_status] -> ``` - -GrafanaAgent will be updated to discover Integrations as part of its resource -hierarchy. During reconcile, the following Kubernetes objects will be deployed: - -* One DaemonSet and Secret if there is at least one integration in the resource - hierarchy where `type.allNodes` is true. - -* One Deployment and Secret if there is at least one integration in the - resource hierarchy where `type.allNodes` is false. - -Secrets hold the generated Grafana Agent configuration; a Secret is used as -integration configs may contain credentials. - -**NOTE**: As this functionality depends on [#1198][], integration pods will -always be deployed with the experimental feature flag -`-enable-feature=integrations-next` enabled. This also means that operator -support for integrations requires a release of the agent where that -experimental feature is available. - -### Integration validation - -The initial implementation of integrations support will have no knowledge of -what integrations exist. As a result, the `spec.type` and `spec.config` fields -for an Integration MUST be configured correctly for an integration to work. -Users must refer to documentation to discover how `type` should be configured -for their specific integration, and what settings are valid for the `config` -block. Configuration errors will only surface as runtime errors from the -deployed agent. - -Future versions of the Operator may: - -* Add knowledge for some integrations and validate `type` and `config` - accordingly (though breaking changes to the config at the Agent level may - introduce extra complexity to this). - -* Update the `status` field of the root GrafanaAgent resource during reconcile - to expose any reconcile or runtime errors. - -### Additional settings for the Integration CRD - -Some integrations may require changes to the deployed Pods to function -properly. Integrations will additionally support declaring `volumes`, -`volumeMounts`, `secrets` and `configMaps`. These fields will be merged with -the fields of the same name from the root GrafanaAgent resource when creating -integration pods: - -> ```yaml -> apiVersion: monitoring.grafana.com/v1alpha1 -> kind: Integration -> metadata: -> name: kafka -> namespace: default -> spec: -> name: kafka_exporter -> config: | -> ca_file: /etc/grafana-agent/secrets/kafka-ca-file -> # ... -> # Same "secrets" field present in GrafanaAgent.spec, where each secret -> # is loaded from the same namespace and gets exposed at -> # /etc/grafana-agent/secrets/ -> secrets: [kafka-ca-file] -> ``` - -### Sending telemetry from integrations - -Because the operator will not have any knowledge about individual integrations, it -also doesn't know how integrations generate telemetry data. Users must manually -configure an integration to send its data to the appropriate instance. - -Users can refer to MetricsInstances and LogsInstance from the same resource -hierarchy by `/` in their integration configs. This -configuring `autoscrape` for collecting metrics from an exporter-based -integration. - -Given the following resource hierarchy: - -> ```yaml -> apiVersion: monitoring.grafana.com/v1alpha1 -> kind: GrafanaAgent -> metadata: -> name: grafana-agent-example -> namespace: default -> labels: -> app: grafana-agent-example -> spec: -> metrics: -> instanceSelector: -> matchLabels: -> agent: grafana-agent-example -> integrations: -> instanceSelector: -> matchLabels: -> agent: grafana-agent-example -> --- -> apiVersion: monitoring.grafana.com/v1alpha1 -> kind: MetricsInstance -> metadata: -> name: primary -> namespace: default -> labels: -> app: grafana-agent-example -> spec: -> remoteWrite: -> - url: http://prometheus:9090/api/v1/write -> --- -> apiVersion: monitoring.grafana.com/v1alpha1 -> kind: Integration -> metadata: -> name: mysql -> namespace: default -> labels: -> app: grafana-agent-example -> spec: -> name: mysqld_exporter -> config: -> autoscrape: -> enable: true -> # MetricsInstance / to send metrics to -> metrics_instance: default/primary -> data_source_name: root@(mysql.default:3306)/ -> disable_collectors: [slave_status] -> ``` - -the Operator would generate the following agent config: - -```yaml -metrics: - configs: - - name: default/primary - remote_write: - - url: http://prometheus:9090/api/v1/write -integrations: - mysqld_exporter_configs: - - autoscrape: - enable: true - metrics_instance: default/primary - data_source_name: root@(mysql.default:3306)/ - disable_collectors: [slave_status] -``` - -All integrations support some way of self-collecting their telemetry data. In -the future, Integrations that support metrics could support being collected by -an external source (i.e., a MetricsInstance). This is out of scope of this -proposal, as we are focusing on lowest-common-denominator support for all -integrations first. - -Note that the Integration config above is only contextually valid: it is only -valid if it is part of a resource hierarchy where a `default/primary` -MetricsInstance exists. This makes it impossible for an Integration to be fully -validated independently of the resource hierarchy where it is discovered. - -## Pros/Cons - -Despite its limitations, this specific implementation is proposed for its -simplicity. Its issues with validation can be resolved in the future without -needing to change the CRD or introduce new CRDs. - -Pros: - -* Works for all known integrations -* Supports future work for custom validation logic -* No changes needed to support future integrations -* You do not have to update the operator to use new integrations - -Cons: - -* Users must know use documentation to configure `type` and `config` properly. -* Without validation, configuration errors can be hard to debug. -* An Integration may be discovered as part of two resource hierarchies, but - refer to a MetricsInstance that exists in one hierarchy but not the other. - -## Alternatives considered - -### Do nothing - -Instead of adding support for integrations, users could be expected to deploy -exporters though custom means (i.e., a `node_exporter` Helm chart + -ServiceMonitor). - -Pros: - -* Requires no additional effort to implement -* Metrics can be scraped by any MetricsInstance -* Feels like a natural fit for Kubernetes' deployment model - -Cons: - -* Prevents non-exporter integrations from working (i.e., `eventhandler` has no - separate container that can be run independently) -* Prevents us from making agent-specific changes on top of exporters -* Requires different documentation for people using the node_exporter - integration vs deploying the actual node_exporter - -### One CRD per integration - -Instead of a generic CRD, we could have CRD per supported integration. - -Pros: - -* Allows creating Kubernetes-specific config schemas for integrations -* Can be validated at the CRD level - -Cons: - -* Operator must be updated whenever a new integration is added -* Adds extra development effort for creating new integrations -* Requires custom config mapping code for each integration -* Breaking changes to Grafana Agent can break the translation of the CRD to - Agent config. - * This is true for the current proposal, but in the current proposal you can - fix the error in the Integration resource, while a custom CRD would need a - new operator version to fix the translation. - -[#883]: https://github.com/grafana/agent/issues/883 -[#1198]: https://github.com/grafana/agent/pull/1198 diff --git a/docs/rfcs/0003-new-metrics-subsystem.md b/docs/rfcs/0003-new-metrics-subsystem.md deleted file mode 100644 index 336c0e4cc4..0000000000 --- a/docs/rfcs/0003-new-metrics-subsystem.md +++ /dev/null @@ -1,279 +0,0 @@ -# New metrics subsystem - -* Date: 2021-11-29 -* Author: Robert Fratto (@rfratto) -* PR: [grafana/agent#1140](https://github.com/grafana/agent/pull/1140) -* Status: Abandoned - -## Background - -There are several open issues discussing major changes to the metrics -subsystem: - -* [#872][#872]: Per-target sharding -* [#873][#873]: Reduce operational modes -* [#875][#875]: Introduce agent-wide clustering mechanism -* [#888][#888]: Remove internal instance manager system - -These are significant changes to the code base. With the exception of #872, all -of the changes are mainly to reduce technical debt. The implementation effort -and lack of end-user benefits make them hard to schedule, despite being -genuinely beneficial for the maintenance of the project. - -This proposal suggests a redesign of the metrics subsystem which has native -support for target sharding and lacks the technical debt from the current -subsystem. - -## Goals - -* Enable dynamically target scraping with: - * Automatic scaling - * Automatic failover - * Target distribution - -## Non-Goals - -* Interaction with this new subsystem from existing subsystems -* Utilization of the configuration management API - -## Implementation - -Given the size of the change, work on the new subsystem should be done in a new -package (e.g., `pkg/metrics/next`), and exposed as an experimental change -hidden behind a feature flag (e.g., `--enable-features=metrics-next`). - -## Design - -The existing metrics subsystem is focused around a runtime-configurable Metrics -Instance system. Metrics Instances are primarily sourced from the config file -(through the `metrics.configs` array), but can also be dynamically added when -using integrations or the scraping service. - -The new metrics subsystem break Metrics Instances up into multiple co-operating -parts: - -1. [Discoverers](#Discoverers) -2. [Scrapers](#Scrapers) -3. [Senders](#Senders) - -A Metrics Instance still exists conceptually, and is configured as normal -through the `metrics.configs` array. However, there will no longer be an -internal CRUD interface for dynamically managing them. - -Finally, an agent-wide clustering mechanism will be added. This clustering -mechanism will allow agents to be aware of other running agents, and will -expose methods for an individual agent to determine ownership of a resource. -The [Clustering](#Clustering) section will describe how this works in detail. - -All agents in the cluster will implement Discovers, Scrapers, and Senders. - -``` - +------------+ +------------+ -Scrape Configs | Config A | | Config B | - +------------+ +------------+ - \ / -(1) SD Distribution +------------/-------------+ - v----------------+ \ - +------------+ +------------+ +------------+ -(2) Discoverers | Agent A | | Agent B | | Agent C | - +------------+ +------------+ +------------+ - \ \ / -(3) Target Distribution +----+---------+-------------/---+ - v-----------------\-----------+ \ - +------------+ +------------+ +------------+ -(4) Scrapers & Senders | Agent A | | Agent B | | Agent C | - +------------+ +------------+ +------------+ - -+============================================================+ -|| || -|| (1) scrape_configs from runtime config are distributed || -|| amongst agents. Agent A owns Config B. Agent C owns || -|| Config A. || -|| || -|| (2) Agents perform service discovery for scrape configs. || -|| || -|| (3) Agents partition discovered targets amongst cluster. || -|| Agent A finds targets for Agent B and C. Agent C || -|| finds targets for Agent A. || -|| || -|| (3) Agents partition discovered targets amongst cluster. || -|| (4) Agents scrape targets from partitions they were sent || -|| and write metrics to WAL which is picked up by || -|| remote_write. || -|| || -+===========================================================+ -``` - -### Discoverers - -Discoverers discover Prometheus targets and distribute them to Scrapers across -the cluster. There is one Discoverer per Metrics Instance in the -`metrics.configs` array from the agent's runtime config. - -Each Discoverer runs a single Prometheus SD manager. The Discoverer will be -launched only with the set of SD jobs that the local agent owns, using the job -name as the ownership key. This introduces one layer of sharding, where each SD -job will only have one agent responsible for it. Note that relabeling rules are -not applied by the Discoverer. - -Discovered targets are flushed to Scrapers in multiple partitions. Partitions -contain a set of targets owned by the same agent in the cluster, and introduces -the second (and last) layer of sharding, where each target will only have one -agent responsible for it. Partitions also include the Metrics Instance name, -since the same job may exist across multiple instances. The `__address__` label -from the target is used as the ownership key. Once all partitions are created, -they are sent to the corresponding agents over gRPC. Partitions that are owned -by the same agent as the Discoverer may be sent through a non-network -mechanism. - -A partition will be created and sent to all agents in the cluster, even if the -partition is empty. This allows agents to know when they can stop scraping -something from a previous received partition. - -Discovered targets will be re-flushed whenever the set of agents in the cluster -changes. - -### Scrapers - -Scrapers receive Prometheus targets from a Discoverer and scrape them, -appending scraped metrics to a Sender. - -Specifically, Scrapers manage a dynamic set of Prometheus scrape managers. One -scrape manager will exist per instance that has a non-empty target partition. -Scrape managers will then be configured with the scrape jobs (including -relabeling rules) if they received at least one target for that job. The -definition of a scrape job is retrieved using the agent's runtime config. - -There may be more than one Discoverer performing SD. This means that a Scraper -can expect to receive target partition from multiple Discoverers, and that it -needs a way to merge those partitions to determine the full set of targets to -scrape. - -Scrapers utilize the knowledge that each targets from a scrape job are owned by -exactly one Discoverer. This allows the merge logic to be simple: store targets -by scrape job name which can be flattened into a single set. Jobs that do not -exist in the agent's runtime config will be ignored when merging, and -eventually removed in the background to limit memory growth. - -With a set of targets, Scrapers will perform relabeling rules, scrape targets, -perform metric relabeling rules, and finally send the metrics to a Sender that -is associated with the Instance name from the partition. - -### Senders - -Finally, Senders store data in a WAL and configure Prometheus remote_write to -ship the WAL metrics to some remote system. - -There is one sender launched per Metrics Instance from the agent configuration -file. Because other subsystems append samples to the WAL for delivery, Senders -must always exist, even if there aren't any Scrapers sending metrics to them. - -The set of running Senders and their individual configurations will update -whenever the agent's configuration file changes. - -### Clustering - -An agent-wide cluster is always available, even if the local agent is not -connected to any remote agents. - -The cluster will initially use [grafana/ckit][ckit], an extremely light -clustering toolkit that uses gossip for peer discovery and health checking. A -hash ring is locally deterministically calculated based on known peers. - -Normally, gossip is done over a dedicated UDP connection to transmit messages -between peers. Since gossip is only utilized here for the peer list and health -checking, gossip is done over the existing gRPC protocol. This has the added -benefits for health checking the gRPC connection directly and reducing the -amount of things to configure when setting up clustering. - -Bootstrapping the cluster will be done through [go-discover][go-discover] and a -`--cluster.discover-peers` command-line flag. This flag will be required to use -clustering, otherwise agents will act as a one-node cluster. - -## Changes from the original design - -### No partition TTL - -The [original proposal][per-target sharding] for target-level sharding used a -TTL to detect if targets from jobs have gone stale. This added unnecessary -complexity to the implementation, and introduced bugs where clock drift could -cause targets to go stale immediately. - -This new design avoids the need for a TTL by instead checking to see if an -entire job has gone stale using the runtime configuration. - -## Edge Cases - -### Discoverer network partition - -A Discoverer network partition occurs when two Discoverers determine ownership -of the same job. This will cause targets to be sent twice to Scrapers. If -targets are sent to the same Scraper, no negative effect will occur: the -merging logic of scrapers will ignore the first partition and use the second -instead. - -However, if targets are sent to different scrapers, then a Scraper network -partition occurs. This may also cause some targets to not be scraped by any -agent, depending on the order in which partitions are received by Discoverers. -Future changes may add resistance to ordering problems by using Lamport clocks. - -### Scraper network partition - -If two Scrapers are scraping the same target, Remote Write will reject the -duplicate samples. Otherwise, no noticeable effect occurs. - -### Unhealthy Discoverer - -Targets sent by the unhealthy Discoverer will continue to be active. Once the -unhealthy Discoverer is removed from the gossip memberlist, a new Discoverer -will pick up its SD jobs and re-deliver targets to the appropriate Scrapers. - -### Unhealthy Scraper - -Targets owned by the Scraper will be unscraped for a brief period of time. The -Scraper will be removed from the gossip memberlist, and force Discoverers to -re-flush targets. The targets will then be assigned to a new Scraper and the -system state will recover. - -### Cluster networking failure - -Nodes must be required to communicate with one another. If this is not -possible, the gossip memberlist will remove unreachable nodes and cause one or -more network partitions. - -## Trade-offs - -### No runtime instance management - -This approach removes runtime instance management by using the loaded -configuration file as the source of truth. Subsystems that previously -dynamically launched instances can work around this by mutating the runtime -config when the config is first loaded. - -### Complexity - -Using the network for distribution adds some level of complexity and fragility -to the system. There may be unidentified edge cases or flaws in the designed -proposed here. - -### No Configuration Store API - -This approach doesn't support an external configuration store API. Such an API -should be delegated to an external process that flushes state to a file for the -agent to read. - -### Configuration Desync - -This approach requires all agents have the same configuration file. This can be -worked around by using [#1121][#1121] to help make sure all agents pull their -configs from the same source. A new metric that hashes the runtime config can -also enable alerting on config desync. - -[#872]: https://github.com/grafana/agent/issues/872 -[#873]: https://github.com/grafana/agent/issues/873 -[#875]: https://github.com/grafana/agent/issues/875 -[#888]: https://github.com/grafana/agent/issues/888 -[per-target sharding]: https://docs.google.com/document/d/1JI804iaut6bKvZprOydes3Gb5Awo_J0sX-3ORlyc5l0 -[ckit]: https://github.com/grafana/ckit -[go-discover]: https://github.com/hashicorp/go-discover -[#1121]: https://github.com/grafana/agent/issues/1121 diff --git a/docs/rfcs/0004-agent-flow.md b/docs/rfcs/0004-agent-flow.md deleted file mode 100644 index 3c1052e926..0000000000 --- a/docs/rfcs/0004-agent-flow.md +++ /dev/null @@ -1,195 +0,0 @@ -# This provided the basis for Agent Flow, and though not all the concepts/ideas will make it into flow, it is good to have the historical context for why we started down this path. - -# Agent Flow - Agent Utilizing Components - - -* Date: 2022-03-30 -* Author: Matt Durham (@mattdurham) -* PRs: - * [grafana/agent#1538](https://github.com/grafana/agent/pull/1538) - Problem Statement - * [grafana/agent#1546](https://github.com/grafana/agent/pull/1546) - Messages and Expressions - -## Overarching Problem Statement - -The Agents configuration and onboarding is difficult to use. Viewing the effect of configuration changes on telemetry data is difficult. Making the configuration simpler, composable and intuitive alleviates these concerns. - - -## Description - -Agent Flow is intended to solve real world needs that the Grafana Agent team have identified in conversations with users and developers. - -These broadly include: - -- Lack of introspection within the agent - - Questions about what telemetry data are being sent - - Are rules applying correctly? - - How does filtering work? -- Users have been requesting additional capabilities and adding new features is hard due to coupling between systems, some examples include - - Remote Write, Different Input Formats - - Different output formats - - Filtering ala Relabel Configs is complex and hard to figure out when they occur -- Lack of understanding how telemetry data moves through agent - - Other systems use pipeline/extensions to allow users to understand how data moves through the system - -# 1. Introduction and Goals - -This design document outlines Agent Flow, a system for describing a programmable pipeline for telemetry data. - -Agent Flow refers to both the execution, configuration and visual configurator of data flow. - -### Goals - -* Allow users to more easily understand the impact of their configuration -* Allow users to collect integration metrics across a set of agents -* Allow users to run components based on a dynamic environment -* Allow developers to easily add components to the system -* Maintain high performance on all currently-supported platforms -* Produce machine-readable and machine-writable configs for tooling such as formatters or a GUI. - -### Non-goals - -* Discuss technical details: we instead focus on how a user would interact with a hypothetical implementation of Agent Flow. - -# 2. Broad Solution Path - -At a high level, Agent Flow: - -* Breaks apart the existing hierarchical configuration file into reusable components -* Allows components to be connected, resulting in a programmable pipeline of telemetry data - -This document considers three potential approaches to allow users to connect components together: - -1. Message passing (i.e., an actor model) -2. Expressions (i.e., directly referencing the output of another component) -3. A hybrid of both messages and expressions - -The Flow Should in general resemble a flowchart or node graph. The data flow diagram would conceptually look like the below, with each node being composable and connecting with other nodes. - -``` -┌─────────────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ -│ │ ┌─────▶│ Target Filter │─────────▶│ Redis Integration │──────▶│ Metric Filter │──┐ -│ │ │ └──────────────────┘ └─────────────────────┘ └───────────────────┘ │ -│ Service Discovery │──────┤ │ -│ │ │ │ -│ │ │ │ -└─────────────────────────┘ │ ┌─────────────────┐ ┌──────────────────────┐ ┌────────┘ - ├─────▶│ Target Filter │──────────▶│ MySQL Integrations │───────────┐ │ - │ └─────────────────┘ └──────────────────────┘ │ │ - │ │ │ - │ ┌─────────────────┐ ┌─────────────┐ │ │ - └──────▶│ Target Filter │─────────────▶│ Scraper │─────────────┐ │ │ ┌────────────────┐ - └─────────────────┘ └─────────────┘ └──┴┬───────┴─▶│ Remote Write │ - │ └────────────────┘ - │ - │ -┌──────────────────────────┐ │ -│ Remote Write Receiver │─────┐ ┌───────────────────────┐ │ -└──────────────────────────┘ │ ┌────▶│ Metric Transformer │─────────┘ - │ │ └───────────────────────┘ - │ │ -┌─────────────────────────┐ │ ┌────────────────────┐ │ -│ HTTP Receiver │──────┴─────▶│ Metric Filter │────┘ ┌──────────────────────────────────┐ -└─────────────────────────┘ └────────────────────┘ │ Global and Server Settings │ - └──────────────────────────────────┘ -``` - -**Note: Consider all examples pseudoconfig** - -## 2.1 Expression Based - -Expression based is writing expressions that allow referencing other components streams/outputs/values and using them directly. Expressions allow referencing other fields, along with complex programming concepts. (functions, arithmetic ect). For instance `field1 = len(service_discover1.targets)`. - -**Pros** - -* Easier to Implement, evaluating expressions can map directly to existing config structs -* Components are more reusable, you can pass basic types around (string, int, bool) in addition to custom types - -**Cons** -* Harder for users to wire things together - * References to components are more complex, which may be harder to understand -* Harder to build a GUI for - * Every field of a component is potentially dynamic, making it harder to represent visually - - -## 2.2 Message Based - -Message based is where components have no knowledge of other components and information is passed strictly via input and output streams. - -**Pros** - -* Easier for users to understand the dependencies between components -* Easier to build a GUI for - * Inputs and Outputs are well defined and less granular - * Connections are made by connecting two components directly, compared to expressions which connect subsets of a component's output -* References between components are no more than strings, making the text-based representation language agnostic (e.g., it could be YAML, JSON, or any language) - -**Cons** - -* More time consuming to implement, existing integrations/items would need to be componentized -* Larger type system needed -* More structured to keep the amount of types down - -Messages require a more rigid type structure to minimize the number of total components. - -For example, it would be preferable to have a single `Credential` type that can be emitted by an s3, Vault, or Consul component. These components would then need to set a field that marks their output as a specific kind of Credential (such as Basic Auth or Bearer Auth). - -If, instead, you had multiple Credential types, like `MySQLCredentials` and `RedisCredentials`, you would have the following components: - -* Vault component for MySQL credentials -* Vault component for Redis credentials -* S3 component for MySQL credentials -* S3 component for Redis credentials -* (and so on) - -## 2.3 Hybrid - -## 2.4 Examples - -### 2.4.1 Simple Example Mysql from Target Discovery - -**Expression** - -``` -discovery "mysql_pods" { - # some sort of config here to find pods -} - - -integration "mysql" { - # Create one mysql integration for every element in the array here - for_each = discovery.mysql_pods.targets - - # Each spawned mysql integration has its data_source_name derived from - # the address label of the input target. - data_source_name = "root@(${each.labels["__address__"]})" -} -``` - -**Message** - -``` -discovery "mysqlpods" { - relabel_config { - [ - { - source = "__address__" - match = "*mysql" - action = "replace" - replacement = "root@($1)" - } - ] - } -} - -# I think this would depend on convention, mysql would look at __address__ , and maybe optionally look for username/password -integration "mysql" {} - -connections { - [ - { - source = mysqlpods - destination = mysql - } - ] -} -``` diff --git a/docs/rfcs/0005-river.md b/docs/rfcs/0005-river.md deleted file mode 100644 index 3fa82a5f7e..0000000000 --- a/docs/rfcs/0005-river.md +++ /dev/null @@ -1,408 +0,0 @@ -# River: A Flow-optimized config language - -* Date: 2022-06-27 -* Author: Robert Fratto (@rfratto), Matt Durham (@mattdurham) -* PR: [grafana/agent#1839](https://github.com/grafana/agent/pull/1839) - -## Summary - -Grafana Agent developers have been working towards a feature called Grafana -Agent Flow ([RFC-0004][]), a component-based re-imagining of Grafana Agent -which compartmentalize the different configurable pieces of the agent, allowing -users to more easily understand and debug configuration issues. Grafana Agent -Flow was purposefully scoped broadly to allow for exploring many different -component-based approaches for prototyping the experimental feature. - -The current implementation strategy focuses around expressions: settings for -components can be derived from expressions which can reference and mutate the -outputs of other components. Values can refer to arbitrary Go values like -interfaces or channels, enabling component developers to easily allow users to -construct data pipelines using Go APIs without requiring knowledge of the -underlying implementation. - -The initial expressions prototype used [HCL][], which initially fit Flow's -needs during early prototyping. However, the growing dependency on passing -around arbitrary Go values to build pipelines started to conflict with the -limitations of HCL, making HCL increasingly insufficient for Flow's specific -use case. - -We examined alternatives to HCL such as YAML, CUE, Jsonnet, Lua, and Go itself. -Eventually, we determined that the way we use arbitrary Go values in -expressions for constructing pipelines is a new use case warranting a -custom-built language. - -This document proposes River, an HCL-inspired declarative expressions-based -language for continuous runtime evaluation. The decision to propose a new -language is not taken lightly, and is seen as the last resort. As such, much of -this proposal will focus on the rationale leading to this choice. - -## Goals - -* Minimize learning curve as much as possible to reduce friction -* Make it easy for developers to create Flow components which operate with - arbitrary Go values (interfaces, channels, etc.) -* Expose error messages in an easily understandable and actionable way -* Natively support using Go values of any type in config expressions -* Natively support passing around and invoking real Go functions in config expressions - -The language design will be scoped as small as possible, and new features will -only be added over time as they are determined to be strictly necessary for -Flow. - -## Non-Goals - -We are not aiming to create a general purpose configuration language. While it -would be possible for River to eventually be used in different contexts by -different projects, the primary goal today is specifically targeting Grafana -Agent Flow. - -We will not provide a full specification for River here, only lightly -describing it to allow implementation details to change over time. - -## Rationale - -### Why an expression language? Why not YAML? - -The entire rationale for creating a new language depends on the rationale that -expressions provide a useful amount of capabilities to users. Expressions -enable users to manipulate values to meet their own use cases in ways that -otherwise would require dedicated feature work, such as: - -* Allowing users to merge metadata together from distinct sources when adding - labels to metrics, such as merging labels from discovered Kubernetes - namespaces with discovered Kubernetes pods. - -* Allowing users to chain Prometheus service discoveries (e.g., feed the output - of Kubernetes Service Discovery into HTTP Service Discovery) - -* Allowing users to perform custom conditional logic, such as increasing rate - limits during busier business months. - -Without expressions, we would need more components for common tasks. A -`concat()` function call can be used to combine lists of discovered Prometheus -targets, but without expressions, there would likely need to be a dedicated -component for aggregating sets of targets together. - -The belief is that the work required to use and maintain an expression language -is far less than the combined work to implement features that would be handled -by expressions out of the box. - -YAML by itself does not support expressions. While expressions could be added -to YAML through the use of templates (e.g., `field_a: {{ some_variable + 5 -}}`), it is beyond the scope of what YAML was intended for and would be more -cumbersome to use compared to a language where expressions are a first-class -concept. - -### Why an embedded language? - -We are using the term "embedded languages" to refer to languages typically -known for the ability for maintainers of the project to expose APIs to users of -the embedded language, such as the Lua API used by Neovim. Embedded languages -typically imply tight integration with the application embedding them, as -opposed to something like YAML which is a language consumed once at load time. - -An embedded language is a good fit for Flow: - -* It makes it easy for developers to expose APIs which users can interact with - or pass around. These APIs can be opaque arbitrary Go types which the user - doesn't need to know the detail of, only that it refers to something like a - stream of metric samples. - -* It is well-suited for continuous evaluation (i.e., the core feature of Flow) - so configuration can adapt to a changing environment. - -### Why a declarative language? Why not Lua? - -The language Flow relies on should have a minimal learning curve. While a -language like Lua could likely be a decent fit for Flow, imperative languages -have steeper learning curves compared to declarative languages. - -Declarative languages natively map to configuration files, since configuration -files are used to tell the application the desired state, reducing the learning -curve for the language and making it easier for users to reason about what the -final config state should be. - -### Why not HCL? - -> For some background, it's important to note that HCL can be considered two -> separate projects: `hashicorp/hcl` (the language and expression evaluator) -> and `zclconf/go-cty` (the value and type system used by HCL). - -HCL was the obvious first choice for the Flow prototype: it supports -expressions, you can expose functions for users to call, and its syntax has a -small learning curve. - -However, I found the [schema-driven processing][] API exposed by HCL to be -difficult to work with for Flow, requiring a lot of boilerplate. While there is -a library to interoperate with tagged Go structs, it was insufficient for -passing around arbitrary Go values, requiring me to [fork][gohcl] both -github.com/hashicorp/hcl/v2/gohcl and github.com/zclconf/go-cty/cty/gocty to -reduce boilerplate. While the fork lets us avoid the boilerplate of hand-crafting -schema definitions for components, it contains a non-trivial amount of changes -that would need to be contributed upstream to be tenable long-term. - -Additionally, there is desired functionality that is not supported today in -HCL/go-cty: - -1. A stronger focus on performance and memory usage, changing go-cty to operate - around Go values instead of converting Go values to a custom representation. - The performance gain will suit our needs for doing continuous evaluation of - expressions. -2. Ability to disable go-cty's requirement that strings are UTF-8 encoded -3. Pass around functions as go-cty values (e.g., to allow a clustering - component to expose a function to check for ownership of key against a hash - ring) -4. Ability to declare local variables in a scope without needing a `locals` - block like as seen in Terraform. - -The combination of desired changes across gohcl and go-cty, the fork that was -already necessary to make it easier to adopt HCL for Flow, and the desire to -have a stronger interaction with arbitrary Go values led to the decision that a -new Flow-specific language was warranted. - -### Why now? - -Grafana Agent Flow is already a dramatic change to the Agent. To avoid users -being exhausted from the frequency of dramatic changes, it would be ideal for -Grafana Agent Flow to ship with River instead of eventually migrating to River. - -## Minimizing impact - -New languages always have some amount of learning curve, and if the learning -curve is too steep, the language will fail to be adopted. - -We will minimize this impact of a new language by: - -* Minimizing the learning curve as much as possible by not creating - too many novel ideas at the language level. - -* Tend the syntax towards allowing users to copy-and-paste examples to learn as - they go. - -* Heavily document the language so that all questions a user may have is - answered. - -* Ensuring that error messages explain the problem and the resolution is - obvious. - -## Proposal - -River's syntax is inspired by HCL. However, some of the syntax will be changed -from HCL to make River more easily identifiable as a different language and -avoid a situations where users confuse the two. - -River focuses on expressions, attributes, and blocks. - -### Expressions - -Expressions resolve to values used by River. The type of expressions are: - -* Literal expressions: - * Booleans: `true`, `false` - * Numbers: `3`, `3.5`, `3e+10`, etc. - * Strings: `"Hello, world!"` -* Unary operations: - * Logical NOT: `!true` - * Negative: `-5` -* Binary operations: - * Math operators: `+`, `-`, `*`, `/`, `^` (pow) - * Equality operators: `==`, `!=`, `<`, `<=`, `>`, `>=` - * Logical operators: `||`, `&&` -* Lists: `[1, 2, 3]` -* Objects: `{ a = 5, b = 6 }` -* Variable reference: `foobar` -* Indexing: `some_list[0]` -* Field access: `some_object.field_a` -* Function calls: `concat([0, 1], [2, 3])` -* Parenthesized expression: `(3 + 5)` - -### Attributes - -Attributes are key-value pairs which set individual settings, formatted as -` = `: - -``` -log_level = "debug" -log_format = "logfmt" -``` - -### Blocks - -Blocks are named groupings of attributes, wrapping in curly braces. Blocks can -also contain other blocks. - -``` -server { - http_address = "127.0.0.1:12345" -} - -prometheus.storage { - remote_write { - url = "http://localhost:9090/api/v1/write" - } - - remote_write { - url = "http://localhost:9091/api/v1/write" - } -} -``` - -Block names must consist of one or more identifiers separated by `.`. Blocks -can also be given user-specified labels, denoted as a string wrapped in quotes: - -``` -prometheus.storage "primary" { - // ... -} - -prometheus.storage "secondary" { - // ... -} -``` - -### Type system - -Values are categorized as being one of the following: - -* `number` -* `bool` -* `string` -* `list` - * Elements within the list do not have to be the same type. -* `object` -* `function` - * Function values differentiate River from HCL/go-cty, which does not support - passing around or invoking function values. -* `capsule` - * Capsule is a catch-all type which refers to some arbitrary Go value which - is not one of the other types. For example, `<-chan int` would be - represented as a capsule in River. - -River types map to Go types as follows: - -* `number`: Go `int*`, `uint*`, `float*` -* `bool`: Go `bool` -* `string`: Go `string`, `[]byte` -* `list`: Go `[]T`, `[...]T`. -* `object`: Go `map[string]T`, and structs -* `function`: Any Go function. - * If the final return value of the Go function is an error, it will be - checked on calling; a non-nil error will cause the evaluation of the - function to fail. -* `capsule`: All other Go values. - * Additionally, type which implements `interface { RiverCapsuleMarker() }` - will also be treated as a capsule. - -River acts like a combination of a configuration language like HCL and an -embedded language like Lua due to its focus on supporting all Go values, -including values which cannot be directly represented by the user (such as Go -interfaces). This enables developers to use native Go types for easily passing -around business logic which users wire together through their configuration. - -### River struct tags - -River struct tags are used to converting between River values and Go structs. -Tags take one of the following forms: - -* `river:"example,attr"`: required attribute named `example` -* `river:"example,attr,optional"`: optional attribute named `example` -* `river:"example,block"`: required block named `example` -* `river:"example,block,optional"`: optional block named `example` -* `river:",label"`: Used for decoding block labels into a `string`. - -Attribute and block names must be unique across the whole type. When encoding a -Go struct, inner blocks are converted into objects. Attributes are converted -into River values of the appropriate type. - -Fields without struct tags are ignored. - -### Errors - -There are multiple types of errors which may occur: - -* Lexing / parsing errors -* Evaluation errors (when evaluating an expression into a River value) -* Decoding errors (when converting a River value into a Go value) -* Validation errors (when Go code validates a value) - -Errors should be displayed to the user in a way that gives as much information -as possible. Errors which involve unexpected values should print the value to -ease debugging. - -For this `example.river` config file which expects the `targets` field to be a -list of objects: - -``` -prometheus.scrape "example1" { - targets = 5 -} - -prometheus.scrape "example2" { - targets = [5] -} - -prometheus.scrape "example3" { - targets = some_list_of_objects + 5 -} -``` - -Errors could be shown to the user like: - -``` -example.river:2:3: targets expects list value, got number - - | targets = 5 - - Value: - 5 - -example.river:6:3: list element 0 must be object, got number - - | targets = [5] - - Value: - 5 - -example.river:10:13: cannot perform `+` on types list and number - - | some_list_of_objects + 5 - - Expression: - [{}] + 5 -``` - -The errors print out the offending portion of the config file alongside the -offending values. Printing out the offending values is useful when the values -come from the result of referring to a variable or calling a function. - -### Concerns - -No existing tooling for River will exist from day one. While the initial -implementation should include a formatter, tools like syntax highlighting or -LSPs won't exist and will need to be created over time. - -## Alternatives considered - -### Handles - -Instead of passing around literal arbitrary Go values, handles could be used to -_refer_ to arbitrary Go values. For example, a number could refer to some entry -in an in-memory store which holds a Go channel or interface. - -Pros: -* Works better with HCL in its current state without needing the gohcl fork -* Would enable YAML, CUE, and Jsonnet to pass around arbitrary values - -Cons: -* Still wouldn't allow HCL to pass around functions as values -* More tedious for developers to work with (they now have to exchange handles - for values). -* Developers will have to deal with extra logic for handling stale handles, - whereas arbitrary Go values would continue to exist until they've been - garbage collected. - -[RFC-0004]: ./0004-agent-flow.md -[HCL]: https://github.com/hashicorp/hcl -[go-cty]: github.com/zclconf/go-cty -[gohcl]: https://github.com/rfratto/gohcl -[schema-driven processing]: https://github.com/hashicorp/hcl/blob/main/spec.md#schema-driven-processing diff --git a/docs/rfcs/0006-clustering.md b/docs/rfcs/0006-clustering.md deleted file mode 100644 index b29070410e..0000000000 --- a/docs/rfcs/0006-clustering.md +++ /dev/null @@ -1,330 +0,0 @@ -# Agent Clustering - -* Date: 2023-03-02 -* Author: Paschalis Tsilias (@tpaschalis) -* PR: [grafana/agent#3151](https://github.com/grafana/agent/pull/3151) - -## Summary - Background -We routinely run agents with 1-10 million active series; we regularly see -sharded agent deployments with ~30M series each run without hiccups. - -Our usual recommendation is to start thinking about horizontal scaling around -the 2M mark. Unfortunately the current -[options](https://grafana.com/docs/agent/latest/operation-guide/) have a number -of challenges, and many of these are not even directly applicable to Flow mode: - -* Hashmod sharding is not dynamic and requires _all_ agents to update their - configuration and reshard whenever a member joins or leaves the cluster. -* The scraping service requires another set of dependencies to be introduced - (etcd or consul), and can only shard on the configuration-file level which -puts the responsibility on the developer to maintain multiple configuration -files that ideally, also balance the number of targets they expose. -* Host filtering ties users to a daemonset-like deployment model and can - unnecessary load on service discovery APIs. -* Hand-writing configuration to distribute the load into different agent - deployments is simply not manageable. - -As Flow mode aims to solve many of the configuration woes of static mode, we -would like to propose a new Flow-native clustering mode that allows the Agent -scale elastically with a single configuration file and an eventually consistent -model. - -## Goals -* Implement a clustered mode that allows the Agent to elastically scale without - changing the configuration -* Enable Flow components to work together and distribute load across a cluster -* Enable fine-grained scheduling of Flow components within a cluster -* Provide an easy-to-use replacement for scraping service and hashmod sharding -* Allow users to understand and debug the status of their cluster - -## Non-goals -* Recreate the scraping service as-is. More specifically: - - Use an external store for configuration files - - Expose an API for managing configuration - - Running multiple configuration files at once. -* Distribute load by merging multiple configuration files. - -## Proposal -The proposal is based on prior art: https://github.com/grafana/agent/issues/872, https://github.com/grafana/agent/pull/1140 - -* We will continue with a - [gossip-based](https://en.wikipedia.org/wiki/Gossip_protocol) approach using -Hashicorp’s memberlist for our cluster -* We will reuse the rfratto/ckit package code -* We will use HTTP2 for communication between nodes -* We will use go-discover for bootstrapping the cluster and discovering peers -* A non-clustered Agent will work similar to a one-node cluster, which in the - future will be the default mode of operation - -## Implementation -The feature will be behind a feature flag `--enable-features=clustering`. An -agent can opt-in to clustering by passing a `--cluster.discover-peers` -command-line flag with a comma-separated list of peers to connect to. Whenever -an agent node receives a message about another node joining or leaving the -cluster, it will propagate the message to its neighbors, and so on, until this -information has reached all members of the cluster. The gossip memberlist will -be utilized for the peer list, health checking and distribution of tokens -between the agent nodes. - -All nodes will have access to a shared ckit.Sharder interface implementation -which will be used to expose methods for each individual agent’s Flow -controller to determine ownership of resources. As nodes enter and exit the -cluster, the Sharder (eg. a consistent hashing ring) will redistribute tokens -among the nodes in the cluster. The eventually consistent cluster state is when -all nodes are working with the same configuration and have knowledge of each -one of their peers in the cluster.The Sharder will be used to determine -ownership of resources by hashing a desired value and checking the peers -responsible for the corresponding ring token. - -When all nodes in the cluster have an up-to-date image of their peers, they -will be able to independently agree to the ownership of a resource without -having to communicate with each other, as local hashing will provide the same -results for all nodes. For example, an agent node will be able to hash the -label set of a target and check which of the peers is responsible for scraping -that target. Similarly, an agent node will be able to hash the fully qualified -component name and decide whether a component needs to be scheduled on this -node or if another peer takes responsibility for it. - -On a more practical note, this clustering will most likely work with a -[Kubernetes HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) -so we can dynamically scale a fleet of agents. The target resource the -autoscaler will look out for is most likely memory usage. In the future we may -allow scaling on different/custom metrics as well. - -We will start by creating the abstractions that will enable the two use-cases -presented in the next section. - -## Use cases -In the first iteration of agent clustering, we would like to start with the -following use-cases. These two are distinct in the way that they make use of -scheduling. - -The first one makes sure that we have a way of notifying components of cluster -changes and calling their Update method and continuously re-evaluate ownership -of resources. When this is implemented, we can start thinking about the second -one that would provide a way to dynamically enable and disable components in -the cluster. - -### Distributing scrape targets -The main predictor for the size of an Agent deployment is the number of targets -it is scraping/reading logs from. Components that use the Flow concept of a -“target” as their Arguments should be able to distribute the target load -between themselves. To do that we can introduce a layer of abstraction over -the Targets definition that can interact with the Sharder provided by the -clusterer and provide a simple API, for example: -```go -type Targets interface { - Get() []Target -} -``` - -When clustering is enabled, this API will allow the component to distribute the -targets using a consistent hashing approach. Ownership is determined using the -entire label set’s fingerprint/hash and where it belongs on the implementation -exposed by the shared Sharder interface and each agent node will only scrape -the targets that it is personally responsible for. When clustering is -disabled, the components should work as if they are a standalone node and -scrape the entire target set. - -We will be using each component’s Arguments to enable/disable the clustering -functionality. The Targets abstraction will be registering and updating -metrics that will detail the distribution of the targets between nodes, as well -as how they change over time. - -I propose that we start with the following set of components that make use of -this functionality: prometheus.scrape, loki.source.file, -loki.source.kubernetes, and pyroscope.scrape. - -Here’s how the configuration for a component could look like: -```river -prometheus.scrape "pods" { - clustering { - node_updates = true - } - - targets = discovery.kubernetes.pods.targets - forward_to = [prometheus.remote_write.default.receiver] -} -``` - -Distribution of load should happen automatically for these components without -any manual handling from the component authors. Components that enable -clustering will register themselves to the Flow controller to be notified and -have their Update method called whenever the state of the cluster changes. An -idea of how this can work is to have something similar to how OnExportsChange -works. This abstraction will allow components to communicate back to the -controller whether a clustering-related Argument has changed. The controller -will keep a record of these components and when a cluster change is detected -these will have their Update method called (or just queued for re-evaluation). - -While the cluster is not in a consistent state, this might lead to temporarily -missing or duplicated scrapes as not all nodes will have the same image of the -cluster and agree on the distribution of tokens. As long as the cluster can -stabilize within a time period comparable to the scraping interval, this should -not be an issue. - -Finally, scaling the cluster up and down results in only 1/N targets’ ownership -being transferred. - -### Component scheduling -The exposed clustering functionality can also allow -for fine-grained scheduling of specific Flow components. The widely used -example is that, in the context of an agent cluster, we would only need to run -a MySQL exporter once per MySQL database. I propose that we create a new -health status called “Disabled”. Graph nodes should provide good hints in the -UI in regards to which agent node the component was scheduled at and work -similarly to Exited nodes. Disabled components should be registered and built -by the controller and _not_ have their Run method called. Downstream -dependencies will get the evaluated values of the components exports, but an -Disabled dependency should not have any other side-effects for now. This may -warrant component changes. For example, the initial value of a LogsReceiver is -a channel which will block fanning out to other entries; this should be fixed. - -The controller will have access to the Sharder implementation and node -ownership is determined by hashing the ComponentNode ID that we want to -schedule. Once all the components have been loaded in, the controller will -check if it should exclude any components from the slice of `runnables` that -will be passed to the Flow Scheduler. - -The same logic should be applied during cluster changes. In case where a new -node might get ownership of the component, this loop will call Synchronize with -the right set of components so that either they Run, or their context is -terminated. - -Finally, when clustering is enabled, each component will expose a set of -metrics that use labels to announce the node it has been scheduled on. If this -is a worry due cardinality issues, we can find another way of providing this -information. - -On a more practical note, we’ll have to choose how components might use to -opt-in to the component scheduling. - -For example, we could implement either: -* Implicitly adding a new Argument block that is implicitly present by default on -_all_ components: -``` ---- cfg.river --- -prometheus.scrape “default” { - clustering { - node_scheduling = true - } - ... -} -``` - -* Having a new method exposed from the component’s Options to enable/disable -clustering and component authors can decide when/how to call it: -``` -func (c *Component) Update(args component.Arguments) error { - newArgs := args.(Arguments) - ... - c.opts.EnableClusterScheduling(newArgs.Clustering) -} -``` - -* Having top-level configuration block that handles which components are -scheduled: -``` ---- cfg.river --- -cluster_scheduling { - enable = [prometheus.scrape.default] -} -``` - -Since we cannot predict which of the growing list of components (up to 57 -currently, >110 planned) will require clustered scheduling, we should try and -find a higher-level abstraction for it so it can be used by _any_ component. -As such, I propose that we go with the _first_ option. It might be a little -harder to implement, but is most in-line with the high-level abstraction that -we’re aiming for. - -## Failure Modes - -### Configuration partition -One of our axioms is that all agents in the same cluster run the same -configuration file, can reach the same discovery APIs and remote endpoints, and -have the same environment variable and network access. In case that agents -_cannot_ run the same configuration file (eg. due to different versions), or -that network issues prevent them from discovering or remote-writing correctly, -it will be hard to debug and understand where the problem lies. - -At the very least, we should report what we can control, and that is the hash -of the configuration file. Ideally, as a new configuration file is being -applied to an agent cluster (eg. pods being rolled out), the state will -eventually be consistent. Is this enough, or should we limit clustering to -only nodes that have the same configuration hash? - -### Networking failures -In case that a node is unreachable due to networking issues, it will be removed -from the gossip memberlist and cause one or more network partitions. - -Also, in case that agent nodes lose connectivity with their cluster peers but -not to scrape targets or remote write, they will fall back to behaving as -single-node clusters leading them to overload themselves. We can recommend -setting some limits per agent to avoid this, or have alerts to detect multiple -single-node clusters running with the same config hash. - -### Scrape targets network partition -If two agents are scraping the same target (unbeknownst to each other), the -cluster will incur some extra load, but remote write will reject the duplicate -sample (first one wins). - -### Scheduling network partition -If two agents are scheduling the same component (unbeknownst to each other), -similarly the cluster will incur some extra load, but the first sample wins. - -### Unhealthy node -In case a node goes unhealthy then both its targets and scheduled components -will end up not scraping any metrics for a period of time. When the node is -removed from the memberlist, then the component will be rescheduled on another -node, and its targets will be redistributed elsewhere. As long as the amount -of time required for the cluster state to recover is (how much?) smaller than -the typical scrape interval, then this behavior might not result in losing any -scrape cycles. - -## Debugging -The clustering implementation must provide tools so that users can understand -how clustering works at the agent node level, as well as the component level. - -On the _node level_, we can introduce a new tab on the Flow UI page which shows -the status of all nodes in the cluster and allows users to navigate to their UI -page. On the _component level_, the component’s debug info will contain -information regarding both the entire target set _and_ the targets the current -node is responsible for, as well as an indication of the work that other -components are doing, and provide a way to navigate to that node’s UI. - -We will expose some clustering-specific internal metrics that provide a view of -the cluster status, such as the hash of the configuration file applied, the -load on each cluster, the tokens the cluster is responsible for, the timestamp -it was last updated, as well as a set of dashboard that can give this -information out at a glance. - -## Questions - Concerns - Limitations - -### Component Arguments naming -I’m not yet 100% sold on the name of the Arguments that components can use to -enable clustering, I’m open to suggestions. More specifically, I’m not sure if -they should be tied to the specific use-case that we’re trying to achieve, or -be more generic. - -### Incurring load on SD API -Having the clustering happen on the targets layer means that an N sized cluster -will require putting N-times more load on the service discovery API. This is -true even today with N hashmod shards, but still might be something to look out -for if we go for larger cluster sizes. - -### Receiver-based components -The clustering approach is mainly useful for distributing _internal_ load in -pull-based pipelines. Push-based pipelines can use different external load -distribution mechanisms such as a load balancer placed in front of replicas so -the clustering approach described here is most likely not applicable. - - ## Future roadmap/ideas -* Should we enable replication from the clustering implementation itself? Eg. - allow targets to belong to _two_ nodes? -* Should we make the target distribution strategy configurable? (eg. determine - ownership by first grouping into `__address__` or some other field) -* Should the new “Disabled” health status propagate through the graph? Eg. if a - prometheus.scrape component only scrapes a prometheus.exporter.mysql -exporter, it should only get scheduled where its dependency is and not -elsewhere. diff --git a/docs/rfcs/0006-future-of-agent-operator.md b/docs/rfcs/0006-future-of-agent-operator.md deleted file mode 100644 index 3a5c3d2e56..0000000000 --- a/docs/rfcs/0006-future-of-agent-operator.md +++ /dev/null @@ -1,35 +0,0 @@ -# Status and future plans for the Grafana Agent Operator - -* Date: 2022-08-17 -* Author: Craig Peterson (@captncraig) -* PR: [grafana/agent#2046](https://github.com/grafana/agent/pull/2046) - -## Summary - -A recent [draft rfc](https://github.com/grafana/agent/pull/1565) discussed the possibility of deprecating the Agent Operator. Although we ultimately closed that proposal, there are still valid concerns in the community about the long-term support to be expected around the operator. -In the interest of full transparency, we'd like to lay out our goals and vision for the project and reaffirm our commitment to it. - -## Goals of the operator - -The operator serves two primary functions: - -1. Allow users to reuse the same monitoring CRDs provided by the [Prometheus Operator](), such as `PodMonitor` and `ServiceMonitor`. This is important to allow dynamic monitoring of kubernetes components, especially in many environments where monitoring configuration is divided among multiple teams. -2. Allow the Agent itself to be installed and configured using `GrafanaAgent` and `MetricsInstance` CRDs. This often simplifies deployments, and allows a declarative configuration style. - -These two goals are somewhat independent of one another. Both of these use cases are important to us, and we are committed to supporting them into the future. - -## Difficulties of the operator - -The operator is a fairly complex piece of code, and has been slower than some other packages to receive updates and new Agent features. It has been received less documentation attention than other areas, and thus is often misunderstood. These are all areas we hope to improve over the next few months. - -## Future Plans for the operator - -1. We intend to bring full support for the Prometheus Operator CRDs into the Grafana Agent itself in the coming months. That will make a good deal of the core functionality of the Operator available to all Agent deployments, whether created by the operator or a helm chart, or whatever other method is chosen. That should also bring some performance and stability improvements, such as fewer agent reloads. -2. Doing that will allow us to reduce the scope of the operator itself while fully maintaining backward compatibility. -3. The operator will then be primarily useful for creating and configuring Grafana Agent instances declaratively. We can then potentially look into alternatives for generating Agent deployment manifests (helm charts, jsonnet libraries, etc..) as our primary recommendation, but will remain mindful that we are committed to the Operator, and will make reasonable efforts to maintain backward compatibility as much as possible. - -## Beta status - -The Grafana Agent Operator is still considered beta software. It has received a better reception than anticipated, and is now an important part of the Agent project. We are committed to supporting the Operator into the future, but are going to leave the beta designation in place while making larger refactorings as described above. We make efforts to avoid breaking changes, and hope that custom resource definitions will remain compatible, but it is possible some changes will be necessary. We will make every effort to justify and communicate such scenarios as they arise. - -Once we are confident we have an Operator we are happy with and that the resource definitions are stable, we will revisit the beta status as soon as we can. diff --git a/docs/rfcs/0007-flow-modules.md b/docs/rfcs/0007-flow-modules.md deleted file mode 100644 index 5058663dd3..0000000000 --- a/docs/rfcs/0007-flow-modules.md +++ /dev/null @@ -1,230 +0,0 @@ -# Flow Modules - -* Date: 2023-01-27 -* Author: Matt Durham @mattdurham -* PR: [grafana/agent#2898](https://github.com/grafana/agent/pull/2898) - -[Formatted Link for ease of user](https://github.com/grafana/agent/blob/rfc_modules/docs/rfcs/0007-flow-modules.md) - -## RFC Goals - -* Explain the use cases of modules -* Explain what modules are -* Go over possible syntax for modules -* Go over pros and cons of modules - -## Summary - -One of the primary goals for the production usage of Agent Flow is parity with the static subsystem. One of the features of the static subsystem is [scraping service](https://github.com/grafana/agent/blob/main/docs/sources/configuration/scraping-service.md). Scraping service allows a user to run a fleet of agents and have thousands of scrape configurations distributed among running Agents. Through discussions within the Agent Team, we did not want to limit dynamically loading content to only scrape configs but allow most components to be loaded and used. - -During this time the Agent team saw a lot of potential in the idea of "modules." Modules package up sets of components for specific workflows, publishing common use cases and allowing better usage for internal customers in the Agent as a Service model. - -## Goals - -* Support single module loading via `module.string` -* Enable re-use of common patterns -* Allow loading a module from a string -* Allow modules to load other modules -* Modules should be sandboxed except via arguments and exports - -### Enable re-use of common patterns - -Common functionality can be wrapped in a set of common components that form a module. These shared modules can then be used instead of reinventing use cases. - -### Allow loading a module from a string - -Modules will not care about the source of a string. In the case of a `module.string` the module will take in one string of valid river configuration. - -### Allow modules to load other modules - -Modules will be able to load other modules, with reasonable safe guards. There will be a stack limit for the depth of sub-modules and modules will be unable to load themselves. - -### Modules should be sandboxed except via arguments and exports - -Modules cannot directly access children or parent modules except through predefined arguments and exports. - -## Non Goals - -Non goals represent capabilities that are not going to be done in the initial release of modules but may come in later versions. - -* Add additional capabilities to load strings -* Any type of versioning -* Any user interface work beyond ensuring it works as the UI currently does -* Any sort of security for modules and what modules are allowed -* Any sort of metadata - - - -### Add additional capabilities to load strings - -Modules will not care about the source of the string that represents the river syntax, nor will modules have any inherent reload semantics. The component supplying the string will be responsible for the source and will notify the module when the input changes so that it can utilize the new river configuration. - -### Any type of versioning - -Modules will not contain any sort of versioning nor will check for compatibility outside the normal river checks. - -### Any user interface work beyond ensuring it works as the UI currently does - -Users will not be able to drill into modules, they will be represented as any other normal component. - -## Example - -```river -// module -argument "password" { - optional = false - comment = "password for mysql" -} - -argument "username" { - optional = false - comment = "username for mysql" -} - -export "targets" { - comment = "targets for the integration" - value = integrations.mysql.server1.targets -} - -integrations.mysql "server1" { - username = argument.username.value - password = argument.password.value -} - -``` - -```river -// parent - -local.file "mysql" { - filename = "/test/mysql.river" -} - -module.string "mysql" { - content = local.file.mysql.content - arguments = { - { - "password" = PASSWORD, - "username" = USERNAME, - } - } -} - -prometheus.scrape "scraper" { - targets = module.string.mysql.exports.targets -} - -``` - -## Limitations - -* A module cannot directly or indirectly load itself, this will not be enforced by the system -* Singleton components are not supported at this time. Example [node_exporter](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.integration.node_exporter/). -* Modules will not prevent competing resources, such as starting a server on the same port -* [Configuration blocks](https://grafana.com/docs/agent/latest/flow/reference/config-blocks/#configuration-blocks) will not be supported. -* Names of arguments and exports within a module must be unique across that module. - -## Proposal - -Add the ability to load `modules` as subgraphs to the primary `graph`. Modules may call other modules within a reasonable stack size depth. Modules are represented as a river string that is interpreted with a defined set of arguments and exports. - -The initial component will be `module.string` that will load a single module. Internally these modules will be namespaced, so they cannot affect children or parent graphs except via arguments and exports. - -Modules will have access to any standard function and any other component exempting singletons. Internally each component in the module will have an `id` that is prepended with the parent's `id` for identification purposes outside of the module. Within the module a component can reference another sibling component normally. There are no known limits on the datatype that a module can use as an argument or export. - -`modules.multiple` will use the key of the `map(string)` to uniquely identify a specific module. - -### Component Options - - -Given the above example, the `id` of `integrations.mysql "server1"` would be `module.string.mysql.integrations.mysql.server1`. The `data-agent` field would also be prefixed. There are some inherent issues, deeply nested metrics are likely to run into Prometheus label value limits. On Windows platforms there could be issues with the `data-agent` length. These are issues that currently exist in Agent Flow but are more easily hit using deeply nested modules. - - -### Failure Modes - -#### Option 1: When a module fails then fail itself and children. - -If an error occurs while re-evaluating a module then the module marks itself as unhealthy and unloads the original module and all children. - -*Pros* - -* Simple to implement -* Easy to understand - -*Cons* - -* One failure mode can cascade - -#### Option 2: Modules keep last good value - -If an error occurs while re-evaluating a module then the module marks itself as unhealthy and attempts to keep the original module. This may have an issue with cascading failures, if a module depends on a module then the system may enter an inconsistent state while applying and then rolling back the change. - -For example, `Module A` has two sub-modules `Module B` and `Module C`. During reevaluation `Module B` reloads appropriately but `Module C` fails. `Module A` unloads both modules and then reloads the last good string. In the case that the last good string also fails then `Module A` is unhealthy and non-functional and `Module A's` submodules do not exist. - -*Pros* - -* Allows more resilient usage - -*Cons* - -* Can create undefined behavior -* Complex to unload and reload - -# Possible Example Documentation for `argument` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ---------------- | ------------------- | ------------------------------------------------------------------------------------------ |---------| -------- -`optional` | `bool` | If an argument has to be specified. | "false" | no -`comment` | `string` | Comment describing what the argument is used for | "" | no -`default` | `any` | Default value if unspecified | | no - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`value` | `any` | The represented value of the argument. - - -# Example Documentation for `export` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ---------------- | ------------------- | ------------------------------------------------------------------------------------------ |---------| -------- -`comment` | `string` | Comment describing what the export is used for | "" | no - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`value` | `any` | The represented value of the export. - - -# Example Documentation for `module.string` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ---------------- | --------------- |-------------------------------------------------------------------------------------------------------------------------------|---------| -------- -`arguments` | `map(string)` | Map of items to pass to module. It is possible to include arguments that are not needed. Any required arguments are required. | "'{}'" | no -`content` | `string` | River configuration to be loaded. | "''" | yes - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`exports` | `map(string)` | The set of exports where the key is the name of an export and the value is it's value diff --git a/docs/rfcs/0008-backwards-compatibility.md b/docs/rfcs/0008-backwards-compatibility.md deleted file mode 100644 index 56d4bac647..0000000000 --- a/docs/rfcs/0008-backwards-compatibility.md +++ /dev/null @@ -1,86 +0,0 @@ -# Grafana Agent backwards compatibility  - -* Date: 2023-05-25 -* Author: Robert Fratto (@rfratto) -* PR: [grafana/agent#3981](https://github.com/grafana/agent/pull/3981) - -Grafana Agent has been following [semantic versioning](https://semver.org/) since its inception. -After three years of development and 33 minor releases, the project is on trajectory to have a 1.0 release.  - -In the context of semantic versioning, a 1.0 release indicates that future minor releases have backwards compatibility with older minor releases in the same major release; version 1.1 is backwards compatible with version 1.0. Having major and minor releases signals to users when an upgrade may take more time (major releases) and when they can upgrade with more confidence (minor releases). - -However, Grafana Agent is a large project, with a large surface area for what may be considered part of the backwards compatibility guarantees. This proposal formally establishes what parts of Grafana Agent will be protected by backwards compatibility.  - -## Goals  - -- Set expectations for what is covered by backwards compatibility.  -- Set expectations for when upgrades to new major versions will be forced. - -## Proposal  - -Backwards compatibility means that a user can upgrade their version of Grafana Agent without needing to make any changes with the way they interact with Grafana Agent, provided that interaction is within scope of being covered by backwards compatibility. - -### Scope of backwards compatibility    - -The following will be protected by backwards compatibility between minor releases:  - -- **User configuration**, including the syntax and functional semantics of the configuration file and command-line interface. - -- **Versioned network APIs**, if any versioned APIs are introduced prior to the 1.0 release. - -- **Telemetry data used in official dashboards**. This means that users will continue to be able to use the same set of dashboards we provide when upgrading minor releases.     - - - Official dashboards are dashboards in the repository's `operations/` [directory](../../operations/). - -- **Externally importable Go packages**. If a user is importing our code as a dependency, they should be able to upgrade to a new minor release without having to make changes to their code. - - The backwards compatibility rules of these packages follow the same expectations as the [Go 1 compatibility][] expectations. - -- **The scope of backwards compatibility**. Backwards compatibility is only defined for major version 1; we reserve the right to change the definition of backwards compatibility between major versions.  - -If a breaking change is introduced in a minor change accidentally, and that breaking change is not covered by one of the [exceptions][] defined below, it is a bug. In these cases, a patch release should be introduced to undo the breaking change.  - -[exceptions]: #exceptions-to-backwards-compatibility -[Go 1 compatibility]: https://go.dev/doc/go1compat - -### Exceptions to backwards compatibility  - -It's impossible to guarantee that full backwards compatibility is achieved. There are some exceptions which may cause a breaking change without a new major version: - -- Non-stable functionality: Functionality which is explicitly marked as non-stable are exempt from backwards compatibility between minor releases. - - Non-stable functionality should be backwards compatible between patch releases, unless a breaking change is required for that patch release. - -- Security: a breaking change may be made if a security fix requires making a breaking change.  - -- Legal requirements: a breaking change may be made if functionality depends on software with a license incompatible with our own. - -- Non-versioned network APIs: internal network APIs, such as the internal API used to drive the Flow web UI, are not subject to backwards compatibility guarantees. - -- Undocumented behavior: relying on undocumented behavior may break between minor releases.  - -- Upstream dependencies: part of the public API of Grafana Agent may directly expose the public API of an upstream dependency. In these cases, if an upstream dependency introduces a breaking change, we may be required to make a breaking change to our public API as well.    - -- Other telemetry data: metrics, logs, and traces may change between releases. Only telemetry data which is used in official dashboards is protected under backwards compatibility. - -### Avoiding major release burnout  - -As a new major release implies a user must put extra effort into upgrading, it is possible to burn out users by releasing breaking changes too frequently.  - -We will attempt to limit new major versions no more than once every 12 calendar months. This means that if Grafana Agent 1.0 was hypothetically released on August 4th, Grafana Agent 2.0 should not be released until at least August 4th of the following year. This is best-effort; if a new major release is required earlier, then we should not prevent ourselves from publishing such a release. - -> **NOTE**: Here, "publishing a release" refers to creating a new versioned release associated with a Git tag and a GitHub release. -> -> Maintainers are free to queue breaking changes for the next major release in a branch at will. - -Major releases should be aligned with breaking changes to the public API and not used as a way to hype a release. If hyping releases is required, there should be a version split between the API version of Grafana Agent and a project version (such as API v1.5, project version 2023.0).    - -### Supporting previous major releases - -When a new major release is published, the previous major release should continue to receive security and bug fixes for a set amount of time. Announcement of a new major release should be coupled with a minimum Long-Term Support (LTS) period for the previous major release. For example, we may choose to announce that Grafana Agent 0.X will continue to be supported for at least 12 months.  - -LTS versions primarily receive security and bug fixes in the form of patch releases. New functionality in the form of minor releases is unlikely to be added to an LTS version, but may happen at the discretion of the project maintainers.  - -Enabling LTS versions will give users additional time they may need to upgrade, especially if there is a significant amount of breaking changes to consider with the new major release.  - -The support timeframe for an LTS version is not fixed, and may change between major releases. For example, version 0.X may receive at least 12 months of LTS, while version 1.X may receive at least 4 months of LTS. Project maintainers will need to decide how long to support previous major versions based on the difficulty for upgrading to the latest major version. From a7899e8927d89ac52b4ba7a06ffcdafe61de3954 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 16:21:36 -0400 Subject: [PATCH 057/136] operations: rename agent-mixin to alloy-mixin (#73) --- .../alerts.libsonnet | 0 .../alerts/clustering.libsonnet | 7 ++--- .../alerts/controller.libsonnet | 2 +- .../alerts/opentelemetry.libsonnet | 0 .../alerts/utils/alert.jsonnet | 0 .../dashboards.libsonnet | 0 .../dashboards/cluster-node.libsonnet | 8 +++--- .../dashboards/cluster-overview.libsonnet | 10 +++---- .../dashboards/controller.libsonnet | 20 +++++++------- .../dashboards/opentelemetry.libsonnet | 4 +-- .../dashboards/prometheus.libsonnet | 12 ++++----- .../dashboards/resources.libsonnet | 26 +++++++++---------- .../dashboards/utils/dashboard.jsonnet | 4 +-- .../dashboards/utils/panel.jsonnet | 0 .../grizzly.jsonnet | 0 .../grizzly/alerts.jsonnet | 0 .../grizzly/dashboards.jsonnet | 0 .../jsonnetfile.json | 0 .../mixin.libsonnet | 2 +- 19 files changed, 48 insertions(+), 47 deletions(-) rename operations/{agent-mixin => alloy-mixin}/alerts.libsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/alerts/clustering.libsonnet (89%) rename operations/{agent-mixin => alloy-mixin}/alerts/controller.libsonnet (96%) rename operations/{agent-mixin => alloy-mixin}/alerts/opentelemetry.libsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/alerts/utils/alert.jsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/dashboards.libsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/dashboards/cluster-node.libsonnet (96%) rename operations/{agent-mixin => alloy-mixin}/dashboards/cluster-overview.libsonnet (92%) rename operations/{agent-mixin => alloy-mixin}/dashboards/controller.libsonnet (95%) rename operations/{agent-mixin => alloy-mixin}/dashboards/opentelemetry.libsonnet (98%) rename operations/{agent-mixin => alloy-mixin}/dashboards/prometheus.libsonnet (97%) rename operations/{agent-mixin => alloy-mixin}/dashboards/resources.libsonnet (89%) rename operations/{agent-mixin => alloy-mixin}/dashboards/utils/dashboard.jsonnet (97%) rename operations/{agent-mixin => alloy-mixin}/dashboards/utils/panel.jsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/grizzly.jsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/grizzly/alerts.jsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/grizzly/dashboards.jsonnet (100%) rename operations/{agent-mixin => alloy-mixin}/jsonnetfile.json (100%) rename operations/{agent-mixin => alloy-mixin}/mixin.libsonnet (56%) diff --git a/operations/agent-mixin/alerts.libsonnet b/operations/alloy-mixin/alerts.libsonnet similarity index 100% rename from operations/agent-mixin/alerts.libsonnet rename to operations/alloy-mixin/alerts.libsonnet diff --git a/operations/agent-mixin/alerts/clustering.libsonnet b/operations/alloy-mixin/alerts/clustering.libsonnet similarity index 89% rename from operations/agent-mixin/alerts/clustering.libsonnet rename to operations/alloy-mixin/alerts/clustering.libsonnet index 5e2ad3c026..f77cc171a8 100644 --- a/operations/agent-mixin/alerts/clustering.libsonnet +++ b/operations/alloy-mixin/alerts/clustering.libsonnet @@ -14,14 +14,15 @@ alert.newGroup( alert.newRule( 'ClusterNodeCountMismatch', // Assert that the number of known peers (regardless of state) reported by each - // agent matches the number of running agents in the same cluster - // and namespace as reported by a count of Prometheus metrics. + // Alloy instance matches the number of running Alloy instances in the + // same cluster and namespace as reported by a count of Prometheus + // metrics. ||| sum without (state) (cluster_node_peers) != on (cluster, namespace) group_left count by (cluster, namespace) (cluster_node_info) |||, - 'Nodes report different number of peers vs. the count of observed agent metrics. Some agent metrics may be missing or the cluster is in a split brain state.', + 'Nodes report different number of peers vs. the count of observed Alloy metrics. Some Alloy metrics may be missing or the cluster is in a split brain state.', '15m', ), diff --git a/operations/agent-mixin/alerts/controller.libsonnet b/operations/alloy-mixin/alerts/controller.libsonnet similarity index 96% rename from operations/agent-mixin/alerts/controller.libsonnet rename to operations/alloy-mixin/alerts/controller.libsonnet index 3aeb5eabbb..b1c419a168 100644 --- a/operations/agent-mixin/alerts/controller.libsonnet +++ b/operations/alloy-mixin/alerts/controller.libsonnet @@ -1,7 +1,7 @@ local alert = import './utils/alert.jsonnet'; alert.newGroup( - 'agent_controller', + 'alloy_controller', [ // Component evaluations are taking too long, which can lead to e.g. stale targets. alert.newRule( diff --git a/operations/agent-mixin/alerts/opentelemetry.libsonnet b/operations/alloy-mixin/alerts/opentelemetry.libsonnet similarity index 100% rename from operations/agent-mixin/alerts/opentelemetry.libsonnet rename to operations/alloy-mixin/alerts/opentelemetry.libsonnet diff --git a/operations/agent-mixin/alerts/utils/alert.jsonnet b/operations/alloy-mixin/alerts/utils/alert.jsonnet similarity index 100% rename from operations/agent-mixin/alerts/utils/alert.jsonnet rename to operations/alloy-mixin/alerts/utils/alert.jsonnet diff --git a/operations/agent-mixin/dashboards.libsonnet b/operations/alloy-mixin/dashboards.libsonnet similarity index 100% rename from operations/agent-mixin/dashboards.libsonnet rename to operations/alloy-mixin/dashboards.libsonnet diff --git a/operations/agent-mixin/dashboards/cluster-node.libsonnet b/operations/alloy-mixin/dashboards/cluster-node.libsonnet similarity index 96% rename from operations/agent-mixin/dashboards/cluster-node.libsonnet rename to operations/alloy-mixin/dashboards/cluster-node.libsonnet index 18ba94026d..e7de6b5a12 100644 --- a/operations/agent-mixin/dashboards/cluster-node.libsonnet +++ b/operations/alloy-mixin/dashboards/cluster-node.libsonnet @@ -1,12 +1,12 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-cluster-node.json'; +local filename = 'alloy-cluster-node.json'; { [filename]: - dashboard.new(name='Grafana Agent Flow / Cluster Node') + + dashboard.new(name='Alloy / Cluster Node') + dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/reference/cli/run/#clustered-mode-experimental', + url='https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode', desc='Clustering documentation', ) + dashboard.withDashboardsLink() + @@ -198,7 +198,7 @@ local filename = 'agent-cluster-node.json'; panel.withDescription(||| The number of packets enqueued currently to be decoded or encoded and sent during communication with other nodes. - The incoming and outgoing packet queue should be as empty as possible; a growing queue means that the Agent cannot keep up with the number of messages required to have all nodes informed of cluster changes, and the nodes may not converge in a timely fashion. + The incoming and outgoing packet queue should be as empty as possible; a growing queue means that Alloy cannot keep up with the number of messages required to have all nodes informed of cluster changes, and the nodes may not converge in a timely fashion. |||) + panel.withPosition({ h: 8, diff --git a/operations/agent-mixin/dashboards/cluster-overview.libsonnet b/operations/alloy-mixin/dashboards/cluster-overview.libsonnet similarity index 92% rename from operations/agent-mixin/dashboards/cluster-overview.libsonnet rename to operations/alloy-mixin/dashboards/cluster-overview.libsonnet index 265d185641..6537020e9f 100644 --- a/operations/agent-mixin/dashboards/cluster-overview.libsonnet +++ b/operations/alloy-mixin/dashboards/cluster-overview.libsonnet @@ -1,13 +1,13 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-cluster-overview.json'; -local cluster_node_filename = 'agent-cluster-node.json'; +local filename = 'alloy-cluster-overview.json'; +local cluster_node_filename = 'alloy-cluster-node.json'; { [filename]: - dashboard.new(name='Grafana Agent Flow / Cluster Overview') + + dashboard.new(name='Alloy / Cluster Overview') + dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/reference/cli/run/#clustered-mode-experimental', + url='https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode', desc='Clustering documentation', ) + dashboard.withDashboardsLink() + @@ -97,7 +97,7 @@ local cluster_node_filename = 'agent-cluster-node.json'; { targetBlank: false, title: 'Detail dashboard for node', - url: '/d/%(uid)s/grafana-agent-flow-cluster-node?var-instance=${__data.fields.instance}&var-datasource=${datasource}&var-loki_datasource=${loki_datasource}&var-cluster=${cluster}&var-namespace=${namespace}' % { uid: std.md5(cluster_node_filename) }, + url: '/d/%(uid)s/alloy-cluster-node?var-instance=${__data.fields.instance}&var-datasource=${datasource}&var-loki_datasource=${loki_datasource}&var-cluster=${cluster}&var-namespace=${namespace}' % { uid: std.md5(cluster_node_filename) }, }, ], }, diff --git a/operations/agent-mixin/dashboards/controller.libsonnet b/operations/alloy-mixin/dashboards/controller.libsonnet similarity index 95% rename from operations/agent-mixin/dashboards/controller.libsonnet rename to operations/alloy-mixin/dashboards/controller.libsonnet index ec059de981..a55b56a3fa 100644 --- a/operations/agent-mixin/dashboards/controller.libsonnet +++ b/operations/alloy-mixin/dashboards/controller.libsonnet @@ -1,12 +1,12 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-controller.json'; +local filename = 'alloy-controller.json'; { [filename]: - dashboard.new(name='Grafana Agent Flow / Controller') + + dashboard.new(name='Alloy / Controller') + dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/concepts/component_controller/', + url='https://grafana.com/docs/alloy/latest/concepts/component_controller/', desc='Component controller documentation', ) + dashboard.withDashboardsLink() + @@ -24,12 +24,12 @@ local filename = 'agent-flow-controller.json'; dashboard.newLokiAnnotation('Deployments', '{cluster="$cluster", container="kube-diff-logger"} | json | namespace_extracted="grafana-agent" | name_extracted=~"grafana-agent.*"', 'rgba(0, 211, 255, 1)'), ]) + dashboard.withPanelsMixin([ - // Running agents + // Running instances ( - panel.newSingleStat('Running agents') + - panel.withUnit('agents') + + panel.newSingleStat('Running instances') + + panel.withUnit('instances') + panel.withDescription(||| - The number of Grafana Agent Flow instances whose metrics are being sent and reported. + The number of Alloy instances whose metrics are being sent and reported. |||) + panel.withPosition({ x: 0, y: 0, w: 10, h: 4 }) + panel.withQueries([ @@ -44,7 +44,7 @@ local filename = 'agent-flow-controller.json'; panel.newSingleStat('Running components') + panel.withUnit('components') + panel.withDescription(||| - The number of running components across all running agents. + The number of running components across all running instances. |||) + panel.withPosition({ x: 0, y: 4, w: 10, h: 4 }) + panel.withQueries([ @@ -139,7 +139,7 @@ local filename = 'agent-flow-controller.json'; }, } + panel.withDescription(||| - Breakdown of components by health across all running agents. + Breakdown of components by health across all running instances. * Healthy: components have been evaluated completely and are reporting themselves as healthy. * Unhealthy: Components either could not be evaluated or are reporting themselves as unhealthy. @@ -147,7 +147,7 @@ local filename = 'agent-flow-controller.json'; * Exited: A component has exited. It will not return to the running state. More information on a component's health state can be retrieved using - the Grafana Agent Flow UI. + the Alloy UI. Note that components may be in a degraded state even if they report themselves as healthy. Use component-specific dashboards and alerts diff --git a/operations/agent-mixin/dashboards/opentelemetry.libsonnet b/operations/alloy-mixin/dashboards/opentelemetry.libsonnet similarity index 98% rename from operations/agent-mixin/dashboards/opentelemetry.libsonnet rename to operations/alloy-mixin/dashboards/opentelemetry.libsonnet index cd3aeb1efc..9ba72856b8 100644 --- a/operations/agent-mixin/dashboards/opentelemetry.libsonnet +++ b/operations/alloy-mixin/dashboards/opentelemetry.libsonnet @@ -1,6 +1,6 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-opentelemetry.json'; +local filename = 'alloy-opentelemetry.json'; local stackedPanelMixin = { fieldConfig+: { @@ -16,7 +16,7 @@ local stackedPanelMixin = { { [filename]: - dashboard.new(name='Grafana Agent Flow / OpenTelemetry') + + dashboard.new(name='Alloy / OpenTelemetry') + dashboard.withDashboardsLink() + dashboard.withUID(std.md5(filename)) + dashboard.withTemplateVariablesMixin([ diff --git a/operations/agent-mixin/dashboards/prometheus.libsonnet b/operations/alloy-mixin/dashboards/prometheus.libsonnet similarity index 97% rename from operations/agent-mixin/dashboards/prometheus.libsonnet rename to operations/alloy-mixin/dashboards/prometheus.libsonnet index 21ae79f3b0..0242388cb5 100644 --- a/operations/agent-mixin/dashboards/prometheus.libsonnet +++ b/operations/alloy-mixin/dashboards/prometheus.libsonnet @@ -1,6 +1,6 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-prometheus-remote-write.json'; +local filename = 'alloy-prometheus-remote-write.json'; local stackedPanelMixin = { fieldConfig+: { @@ -30,7 +30,7 @@ local scrapePanels(y_offset) = [ across all the namespaces in the selected cluster. Low success rates can indicate a problem with scrape targets, - stale service discovery, or agent misconfiguration. + stale service discovery, or Alloy misconfiguration. |||) + panel.withPosition({ x: 0, y: 1 + y_offset, w: 12, h: 10 }) + panel.withQueries([ @@ -55,7 +55,7 @@ local scrapePanels(y_offset) = [ This metric should be below your configured scrape interval. High durations can indicate a problem with a scrape target or - a performance issue with the agent. + a performance issue with Alloy. |||) + panel.withPosition({ x: 12, y: 1 + y_offset, w: 12, h: 10 }) + panel.withQueries([ @@ -346,7 +346,7 @@ local remoteWritePanels(y_offset) = [ panel.withUnit('short') + panel.withDescription(||| Total number of active series which are currently being tracked by - prometheus.remote_write components, with separate lines for each agent instance. + prometheus.remote_write components, with separate lines for each Alloy instance. An "active series" is a series that prometheus.remote_write recently received a sample for. Active series are garbage collected whenever a @@ -389,9 +389,9 @@ local remoteWritePanels(y_offset) = [ { [filename]: - dashboard.new(name='Grafana Agent Flow / Prometheus Components') + + dashboard.new(name='Alloy / Prometheus Components') + dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/', + url='https://grafana.com/docs/alloy/latest/reference/components/prometheus.remote_write/', desc='Component documentation', ) + dashboard.withDashboardsLink() + diff --git a/operations/agent-mixin/dashboards/resources.libsonnet b/operations/alloy-mixin/dashboards/resources.libsonnet similarity index 89% rename from operations/agent-mixin/dashboards/resources.libsonnet rename to operations/alloy-mixin/dashboards/resources.libsonnet index 95ceb4f348..e1cd7a6209 100644 --- a/operations/agent-mixin/dashboards/resources.libsonnet +++ b/operations/alloy-mixin/dashboards/resources.libsonnet @@ -1,6 +1,6 @@ local dashboard = import './utils/dashboard.jsonnet'; local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-resources.json'; +local filename = 'alloy-resources.json'; local pointsMixin = { fieldConfig+: { @@ -28,7 +28,7 @@ local stackedPanelMixin = { { [filename]: - dashboard.new(name='Grafana Agent Flow / Resources') + + dashboard.new(name='Alloy / Resources') + dashboard.withDashboardsLink() + dashboard.withUID(std.md5(filename)) + dashboard.withTemplateVariablesMixin([ @@ -52,7 +52,7 @@ local stackedPanelMixin = { panel.new(title='CPU usage', type='timeseries') + panel.withUnit('percentunit') + panel.withDescription(||| - CPU usage of the Grafana Agent process relative to 1 CPU core. + CPU usage of the Alloy process relative to 1 CPU core. For example, 100% means using one entire CPU core. |||) + @@ -70,7 +70,7 @@ local stackedPanelMixin = { panel.new(title='Memory (RSS)', type='timeseries') + panel.withUnit('decbytes') + panel.withDescription(||| - Resident memory size of the Grafana Agent process. + Resident memory size of the Alloy process. |||) + panel.withPosition({ x: 12, y: 0, w: 12, h: 8 }) + panel.withQueries([ @@ -87,13 +87,13 @@ local stackedPanelMixin = { pointsMixin + panel.withUnit('ops') + panel.withDescription(||| - Rate at which the Grafana Agent process performs garbage collections. + Rate at which the Alloy process performs garbage collections. |||) + panel.withPosition({ x: 0, y: 8, w: 8, h: 8 }) + panel.withQueries([ panel.newQuery( // Lots of programs export go_goroutines so we ignore anything that - // doesn't also have a Grafana Agent-specific metric (i.e., + // doesn't also have an Alloy-specific metric (i.e., // agent_build_info). expr=||| rate(go_gc_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance"}[5m]) @@ -117,7 +117,7 @@ local stackedPanelMixin = { panel.withQueries([ panel.newQuery( // Lots of programs export go_goroutines so we ignore anything that - // doesn't also have a Grafana Agent-specific metric (i.e., + // doesn't also have an Alloy-specific metric (i.e., // agent_build_info). expr=||| go_goroutines{cluster="$cluster",namespace="$namespace",instance=~"$instance"} @@ -134,13 +134,13 @@ local stackedPanelMixin = { panel.new(title='Memory (heap inuse)', type='timeseries') + panel.withUnit('decbytes') + panel.withDescription(||| - Heap memory currently in use by the Grafana Agent process. + Heap memory currently in use by the Alloy process. |||) + panel.withPosition({ x: 16, y: 8, w: 8, h: 8 }) + panel.withQueries([ panel.newQuery( // Lots of programs export go_memstats_heap_inuse_bytes so we ignore - // anything that doesn't also have a Grafana Agent-specific metric + // anything that doesn't also have an Alloy-specific metric // (i.e., agent_build_info). expr=||| go_memstats_heap_inuse_bytes{cluster="$cluster",namespace="$namespace",instance=~"$instance"} @@ -159,10 +159,10 @@ local stackedPanelMixin = { panel.withUnit('Bps') + panel.withDescription(||| Rate of data received across all network interfaces for the machine - Grafana Agent is running on. + Alloy is running on. Data shown here is across all running processes and not exclusive to - the running Grafana Agent process. + the running Alloy process. |||) + panel.withPosition({ x: 0, y: 16, w: 12, h: 8 }) + panel.withQueries([ @@ -182,10 +182,10 @@ local stackedPanelMixin = { panel.withUnit('Bps') + panel.withDescription(||| Rate of data sent across all network interfaces for the machine - Grafana Agent is running on. + Alloy is running on. Data shown here is across all running processes and not exclusive to - the running Grafana Agent process. + the running Alloy process. |||) + panel.withPosition({ x: 12, y: 16, w: 12, h: 8 }) + panel.withQueries([ diff --git a/operations/agent-mixin/dashboards/utils/dashboard.jsonnet b/operations/alloy-mixin/dashboards/utils/dashboard.jsonnet similarity index 97% rename from operations/agent-mixin/dashboards/utils/dashboard.jsonnet rename to operations/alloy-mixin/dashboards/utils/dashboard.jsonnet index e17b0ff678..40b5f85ced 100644 --- a/operations/agent-mixin/dashboards/utils/dashboard.jsonnet +++ b/operations/alloy-mixin/dashboards/utils/dashboard.jsonnet @@ -8,7 +8,7 @@ refresh: '10s', schemaVersion: 36, graphTooltip: 1, // shared crosshair for all graphs - tags: ['grafana-agent-mixin'], + tags: ['alloy-mixin'], templating: { list: [{ name: 'datasource', @@ -122,7 +122,7 @@ icon: 'external link', includeVars: true, keepTime: true, - tags: ['grafana-agent-mixin'], + tags: ['alloy-mixin'], targetBlank: false, }], }, diff --git a/operations/agent-mixin/dashboards/utils/panel.jsonnet b/operations/alloy-mixin/dashboards/utils/panel.jsonnet similarity index 100% rename from operations/agent-mixin/dashboards/utils/panel.jsonnet rename to operations/alloy-mixin/dashboards/utils/panel.jsonnet diff --git a/operations/agent-mixin/grizzly.jsonnet b/operations/alloy-mixin/grizzly.jsonnet similarity index 100% rename from operations/agent-mixin/grizzly.jsonnet rename to operations/alloy-mixin/grizzly.jsonnet diff --git a/operations/agent-mixin/grizzly/alerts.jsonnet b/operations/alloy-mixin/grizzly/alerts.jsonnet similarity index 100% rename from operations/agent-mixin/grizzly/alerts.jsonnet rename to operations/alloy-mixin/grizzly/alerts.jsonnet diff --git a/operations/agent-mixin/grizzly/dashboards.jsonnet b/operations/alloy-mixin/grizzly/dashboards.jsonnet similarity index 100% rename from operations/agent-mixin/grizzly/dashboards.jsonnet rename to operations/alloy-mixin/grizzly/dashboards.jsonnet diff --git a/operations/agent-mixin/jsonnetfile.json b/operations/alloy-mixin/jsonnetfile.json similarity index 100% rename from operations/agent-mixin/jsonnetfile.json rename to operations/alloy-mixin/jsonnetfile.json diff --git a/operations/agent-mixin/mixin.libsonnet b/operations/alloy-mixin/mixin.libsonnet similarity index 56% rename from operations/agent-mixin/mixin.libsonnet rename to operations/alloy-mixin/mixin.libsonnet index 3a49e7f8f6..741c943035 100644 --- a/operations/agent-mixin/mixin.libsonnet +++ b/operations/alloy-mixin/mixin.libsonnet @@ -1,3 +1,3 @@ -{ grafanaDashboardFolder: 'Grafana Agent Flow' } + +{ grafanaDashboardFolder: 'Alloy' } + (import './dashboards.libsonnet') + (import './alerts.libsonnet') From 073c2881a41052b0ab26f083ab5633089f748bad Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 25 Mar 2024 16:26:10 -0400 Subject: [PATCH 058/136] operations: rename river-jsonnet to alloy-syntax-jsonnet (#74) --- operations/alloy-syntax-jsonnet/README.md | 65 +++++++++++++++++++ .../builder.jsonnet | 6 +- .../internal/utils.jsonnet | 0 .../jsonnetfile.json | 0 .../main.libsonnet | 0 .../main_test.jsonnet | 34 +++++----- .../manifest.jsonnet | 22 +++---- operations/river-jsonnet/README.md | 65 ------------------- 8 files changed, 96 insertions(+), 96 deletions(-) create mode 100644 operations/alloy-syntax-jsonnet/README.md rename operations/{river-jsonnet => alloy-syntax-jsonnet}/builder.jsonnet (74%) rename operations/{river-jsonnet => alloy-syntax-jsonnet}/internal/utils.jsonnet (100%) rename operations/{river-jsonnet => alloy-syntax-jsonnet}/jsonnetfile.json (100%) rename operations/{river-jsonnet => alloy-syntax-jsonnet}/main.libsonnet (100%) rename operations/{river-jsonnet => alloy-syntax-jsonnet}/main_test.jsonnet (79%) rename operations/{river-jsonnet => alloy-syntax-jsonnet}/manifest.jsonnet (90%) delete mode 100644 operations/river-jsonnet/README.md diff --git a/operations/alloy-syntax-jsonnet/README.md b/operations/alloy-syntax-jsonnet/README.md new file mode 100644 index 0000000000..b63ef8d8cd --- /dev/null +++ b/operations/alloy-syntax-jsonnet/README.md @@ -0,0 +1,65 @@ +# `alloy-syntax-jsonnet` library + +The `alloy-syntax-jsonnet` library makes it possible to create Alloy syntax +config files using Jsonnet. + +To manifest a configuration file, call `alloy.manifestAlloy(value)`. + +Field names from objects are expected to follow one of the three forms: + +* `` for Alloy attributes (e.g., `foobar`). +* `block ` for unlabeled Alloy blocks (e.g., `block exporter.unix`) +* `block